diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile
new file mode 100644
index 0000000000..4234de160b
--- /dev/null
+++ b/.ci/Jenkinsfile
@@ -0,0 +1,81 @@
+def golang = ['1.23', '1.24']
+def golangDefault = "golang:${golang.last()}"
+
+async {
+
+ for (version in golang) {
+ def go = version
+
+ task("test/go${go}") {
+ container("golang:${go}") {
+ sh 'make test'
+ }
+ }
+
+ task("build/go${go}") {
+ container("golang:${go}") {
+ for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
+ sh """
+ make bin/frostfs-${app}
+ bin/frostfs-${app} --version
+ """
+ }
+ }
+ }
+ }
+
+ task('test/race') {
+ container(golangDefault) {
+ sh 'make test GOFLAGS="-count=1 -race"'
+ }
+ }
+
+ task('lint') {
+ container(golangDefault) {
+ sh 'make lint-install lint'
+ }
+ }
+
+ task('staticcheck') {
+ container(golangDefault) {
+ sh 'make staticcheck-install staticcheck-run'
+ }
+ }
+
+ task('gopls') {
+ container(golangDefault) {
+ sh 'make gopls-install gopls-run'
+ }
+ }
+
+ task('gofumpt') {
+ container(golangDefault) {
+ sh '''
+ make fumpt-install
+ make fumpt
+ git diff --exit-code --quiet
+ '''
+ }
+ }
+
+ task('vulncheck') {
+ container(golangDefault) {
+ sh '''
+ go install golang.org/x/vuln/cmd/govulncheck@latest
+ govulncheck ./...
+ '''
+ }
+ }
+
+ task('pre-commit') {
+ dockerfile("""
+ FROM ${golangDefault}
+ RUN apt update && \
+ apt install -y --no-install-recommends pre-commit
+ """) {
+ withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
+ sh 'pre-commit run --color=always --hook-stage=manual --all-files'
+ }
+ }
+ }
+}
diff --git a/.docker/Dockerfile.adm b/.docker/Dockerfile.adm
index 09d66e6422..42aeebc482 100644
--- a/.docker/Dockerfile.adm
+++ b/.docker/Dockerfile.adm
@@ -1,4 +1,4 @@
-FROM golang:1.18 as builder
+FROM golang:1.23 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.ci b/.docker/Dockerfile.ci
new file mode 100644
index 0000000000..9ddd8de593
--- /dev/null
+++ b/.docker/Dockerfile.ci
@@ -0,0 +1,25 @@
+FROM golang:1.23
+
+WORKDIR /tmp
+
+# Install apt packages
+RUN apt-get update && apt-get install --no-install-recommends -y \
+ pip \
+ && apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \
+ && rm -rf /var/lib/apt/lists/*
+
+# Dash → Bash
+RUN echo "dash dash/sh boolean false" | debconf-set-selections
+RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash
+
+RUN useradd -u 1234 -d /home/ci -m ci
+USER ci
+
+ENV PATH="$PATH:/home/ci/.local/bin"
+
+COPY .pre-commit-config.yaml .
+
+RUN pip install "pre-commit==3.1.1" \
+ && git init . \
+ && pre-commit install-hooks \
+ && rm -rf /tmp/*
diff --git a/.docker/Dockerfile.cli b/.docker/Dockerfile.cli
index c706359b34..16f1300562 100644
--- a/.docker/Dockerfile.cli
+++ b/.docker/Dockerfile.cli
@@ -1,4 +1,4 @@
-FROM golang:1.18 as builder
+FROM golang:1.23 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.ir b/.docker/Dockerfile.ir
index 9f8e723860..c119f8127a 100644
--- a/.docker/Dockerfile.ir
+++ b/.docker/Dockerfile.ir
@@ -1,4 +1,4 @@
-FROM golang:1.18 as builder
+FROM golang:1.23 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.storage b/.docker/Dockerfile.storage
index 39eb195595..854f7adeab 100644
--- a/.docker/Dockerfile.storage
+++ b/.docker/Dockerfile.storage
@@ -1,4 +1,4 @@
-FROM golang:1.18 as builder
+FROM golang:1.23 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.storage-testnet b/.docker/Dockerfile.storage-testnet
deleted file mode 100644
index 908ff0aadf..0000000000
--- a/.docker/Dockerfile.storage-testnet
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM golang:1.18 as builder
-ARG BUILD=now
-ARG VERSION=dev
-ARG REPO=repository
-WORKDIR /src
-COPY . /src
-
-RUN make bin/frostfs-node
-
-# Executable image
-FROM alpine AS frostfs-node
-RUN apk add --no-cache bash
-
-WORKDIR /
-
-COPY --from=builder /src/bin/frostfs-node /bin/frostfs-node
-COPY --from=builder /src/config/testnet/config.yml /config.yml
-
-CMD ["frostfs-node", "--config", "/config.yml"]
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.forgejo/ISSUE_TEMPLATE/bug_report.md
similarity index 76%
rename from .github/ISSUE_TEMPLATE/bug_report.md
rename to .forgejo/ISSUE_TEMPLATE/bug_report.md
index f1653e3f45..fb169997c3 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.forgejo/ISSUE_TEMPLATE/bug_report.md
@@ -2,7 +2,7 @@
name: Bug report
about: Create a report to help us improve
title: ''
-labels: community, triage
+labels: community, triage, bug
assignees: ''
---
@@ -18,8 +18,11 @@ assignees: ''
If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution
-
+
## Steps to Reproduce (for bugs)
FrostFS Storage[Container]Blockchain[Container]Sign Service Check request signatureAPE ServiceObject serviceNeoGoFrostFS ID Stores namespaces andusersPolicy Stores APE rulesUser User with private keyRequests[gRPC]Access controlOperationGet data to validaterequestFetch usersFetch policiesLegend ▯ person ▯ component ▯ container boundary(dashed) ▯ smart-contract(last text color)
\ No newline at end of file
diff --git a/docs/images/authentication/impersonate.puml b/docs/images/authentication/impersonate.puml
new file mode 100644
index 0000000000..f0a5436f96
--- /dev/null
+++ b/docs/images/authentication/impersonate.puml
@@ -0,0 +1,15 @@
+@startuml impersonate
+start
+
+if (The request has bearer token with allow_impersonate=true?) then (yes)
+ :Treat bearer token issuer as the request owner.;
+ end
+(no) elseif (The request has session token?) then (yes)
+ :Treat session token issuer as the request owner.;
+ end
+else (no)
+ :Determine request owner from the request signature.;
+ end
+endif
+
+@enduml
diff --git a/docs/images/authentication/impersonate.svg b/docs/images/authentication/impersonate.svg
new file mode 100644
index 0000000000..add2c5439f
--- /dev/null
+++ b/docs/images/authentication/impersonate.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/release-instruction.md b/docs/release-instruction.md
index ec7b8cdf34..aa867e83cc 100644
--- a/docs/release-instruction.md
+++ b/docs/release-instruction.md
@@ -9,7 +9,7 @@ These should run successfully:
* `make lint` (should not change any files);
* `make fmts` (should not change any files);
* `go mod tidy` (should not change any files);
-* integration tests in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv).
+* integration tests in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env).
## Make release commit
@@ -43,11 +43,6 @@ Write new revision number into the root `VERSION` file:
$ echo ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} > VERSION
```
-Update version in Debian package changelog file
-```
-$ cat debian/changelog
-```
-
Update the supported version of `TrueCloudLab/frostfs-contract` module in root
`README.md` if needed.
@@ -60,7 +55,7 @@ Add an entry to the `CHANGELOG.md` following the style established there.
* update `Unreleased...new` and `new...old` diff-links at the bottom of the file
* add optional codename and release date in the heading
* remove all empty sections such as `Added`, `Removed`, etc.
-* make sure all changes have references to GitHub issues in `#123` format (if possible)
+* make sure all changes have references to relevant issues in `#123` format (if possible)
* clean up all `Unreleased` sections and leave them empty
### Make release commit
@@ -100,35 +95,31 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release
-### Prepare and push images to a Docker Hub (if not automated)
+### Prepare and push images to a Docker registry (automated)
-Create Docker images for all applications and push them into Docker Hub
-(requires [organization](https://hub.docker.com/u/truecloudlab) privileges)
+Create Docker images for all applications and push them into container registry
+(executed automatically in Forgejo Actions upon pushing a release tag):
```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images
-$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION}
-$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
-$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
-$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
-$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
+$ make push-images
```
-### Make a proper GitHub release (if not automated)
+### Make a proper release (if not automated)
-Edit an automatically-created release on GitHub, copy things from `CHANGELOG.md`.
+Edit an automatically-created release on git.frostfs.info, copy things from `CHANGELOG.md`.
Build and tar release binaries with `make prepare-release`, attach them to
the release. Publish the release.
### Update FrostFS Developer Environment
-Prepare pull-request in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv)
+Prepare pull-request in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
with new versions.
-### Close GitHub milestone
+### Close milestone
-Look up GitHub [milestones](https://github.com/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
+Look up [milestones](https://git.frostfs.info/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
### Rebuild FrostFS LOCODE database
diff --git a/docs/shard-modes.md b/docs/shard-modes.md
index 7fca33d093..6cc4ab13c8 100644
--- a/docs/shard-modes.md
+++ b/docs/shard-modes.md
@@ -14,7 +14,16 @@ Each mode is characterized by two important properties:
| `read-only` | Read-only mode, only read operations are allowed, metabase is available. |
| `degraded` | Degraded mode in which metabase and write-cache is disabled. It shouldn't be used at all, because metabase can contain important indices, such as LOCK objects info and modifying operation in this mode can lead to unexpected behaviour. The purpose of this mode is to allow PUT/DELETE operations without the metabase if really necessary. |
| `degraded-read-only` | Same as `degraded`, but with only read operations allowed. This mode is used during SSD replacement and/or when the metabase error counter exceeds threshold. |
-| `disabled` | Currently used only in config file to temporarily disable a shard. |
+| `disabled` | Currently used only in config file to temporarily disable a shard.
+
+## Shard and Component Status
+
+| Shard Mode | Metabase Mode | Blobstore Mode | Writecache Mode | Pilorama Mode | Blobovnicza Tree Mode | FSTree Mode |
+|-----------------------|---------------|----------------|-----------------|---------------|-----------------------|-------------|
+| `Read-Write` | READ_WRITE | READ_WRITE | READ_WRITE | READ_WRITE | READ_WRITE | READ_WRITE |
+| `Read-Only` | READ_ONLY | READ_ONLY | READ_ONLY | READ_ONLY | READ_ONLY | READ_ONLY |
+| `Degraded-Read-Write` | CLOSED | READ_WRITE | CLOSED | CLOSED | READ_WRITE | READ_WRITE |
+| `Degraded-Read-Only` | CLOSED | READ_ONLY | CLOSED | CLOSED | READ_ONLY | READ_ONLY |
## Transition order
@@ -42,7 +51,10 @@ However, all mode changing operations are idempotent.
## Automatic mode changes
-Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
-1. If the metabase was not available or couldn't be opened/initialized during shard startup.
-2. If shard error counter exceeds threshold.
-3. If the metabase couldn't be reopened during SIGHUP handling.
+A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
+
+# Detach shard
+
+To detach a shard use `frostfs-cli control shards detach` command. This command removes the shards from the storage
+engine and closes all resources associated with the shards.
+Limitation: `SIGHUP` or storage node restart lead to detached shard will be again online.
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 16737ab9f1..da9fdfed05 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -14,6 +14,7 @@ There are some custom types used for brevity:
| Section | Description |
|--------------|---------------------------------------------------------|
+| `node` | [Node parameters](#node-section) |
| `logger` | [Logging parameters](#logger-section) |
| `pprof` | [PProf configuration](#pprof-section) |
| `prometheus` | [Prometheus metrics configuration](#prometheus-section) |
@@ -24,7 +25,10 @@ There are some custom types used for brevity:
| `policer` | [Policer service configuration](#policer-section) |
| `replicator` | [Replicator service configuration](#replicator-section) |
| `storage` | [Storage engine configuration](#storage-section) |
-
+| `runtime` | [Runtime configuration](#runtime-section) |
+| `audit` | [Audit configuration](#audit-section) |
+| `multinet` | [Multinet configuration](#multinet-section) |
+| `qos` | [QoS configuration](#qos-section) |
# `control` section
```yaml
@@ -75,13 +79,23 @@ element.
Contains configuration for the `pprof` profiler.
-| Parameter | Type | Default value | Description |
-|--------------------|------------|---------------|-----------------------------------------|
-| `enabled` | `bool` | `false` | Flag to enable the service. |
-| `address` | `string` | | Address that service listener binds to. |
-| `shutdown_timeout` | `duration` | `30s` | Time to wait for a graceful shutdown. |
+| Parameter | Type | Default value | Description |
+|--------------------|-----------------------------------|---------------|-----------------------------------------|
+| `enabled` | `bool` | `false` | Flag to enable the service. |
+| `address` | `string` | | Address that service listener binds to. |
+| `shutdown_timeout` | `duration` | `30s` | Time to wait for a graceful shutdown. |
+| `debug` | [Debug config](#debug-subsection) | | Optional profiles configuration |
+## `debug` subsection
+
+Contains optional profiles configuration.
+
+| Parameter | Type | Default value | Description |
+|--------------|-------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `block_rate` | `int` | `0` | Controls the block profiler. Non-positive values disable profiler reports. For more information: https://pkg.go.dev/runtime@go1.20.3#SetBlockProfileRate. |
+| `mutex_rate` | `int` | `0` | Controls the mutex profiler. Non-positive values disable profiler reports. For more information: https://pkg.go.dev/runtime@go1.20.3#SetMutexProfileFraction. |
+
# `prometheus` section
Contains configuration for the `prometheus` metrics service.
@@ -98,11 +112,21 @@ Contains logger parameters.
```yaml
logger:
level: info
+ tags:
+ - names: "main, morph"
+ level: debug
```
-| Parameter | Type | Default value | Description |
-|-----------|----------|---------------|---------------------------------------------------------------------------------------------------|
-| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
+| Parameter | Type | Default value | Description |
+|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------|
+| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
+| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. |
+
+## `tags` subsection
+| Parameter | Type | Default value | Description |
+|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. |
+| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. |
# `contracts` section
Contains override values for FrostFS side-chain contract hashes. Most of the time contract
@@ -113,18 +137,14 @@ contracts:
balance: 5263abba1abedbf79bb57f3e40b50b4425d2d6cd
container: 5d084790d7aa36cea7b53fe897380dab11d2cd3c
netmap: 0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca
- reputation: 441995f631c1da2b133462b71859494a5cd45e90
proxy: ad7c6b55b737b696e5c82c85445040964a03e97f
```
| Parameter | Type | Default value | Description |
|--------------|-----------|---------------|---------------------------|
-| `audit` | `hash160` | | Audit contract hash. |
| `balance` | `hash160` | | Balance contract hash. |
| `container` | `hash160` | | Container contract hash. |
| `netmap` | `hash160` | | Netmap contract hash. |
-| `reputation` | `hash160` | | Reputation contract hash. |
-| `subnet` | `hash160` | | Subnet contract hash. |
# `morph` section
@@ -132,20 +152,26 @@ contracts:
morph:
dial_timeout: 30s
cache_ttl: 15s
+ ape_chain_cache_size: 10000
rpc_endpoint:
- address: wss://rpc1.morph.frostfs.info:40341/ws
priority: 1
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
switch_interval: 2m
+ netmap:
+ candidates:
+ poll_interval: 20s
```
-| Parameter | Type | Default value | Description |
-|-------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
-| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
-| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
-| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
+| Parameter | Type | Default value | Description |
+|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
+| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
+| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
+| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
+| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
+| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. |
## `rpc_endpoint` subsection
| Parameter | Type | Default value | Description |
@@ -159,8 +185,8 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
-| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
+| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
## `shard` subsection
@@ -169,17 +195,41 @@ Contains configuration for each shard. Keys must be consecutive numbers starting
`default` subsection has the same format and specifies defaults for missing values.
The following table describes configuration for each shard.
-| Parameter | Type | Default value | Description |
-|-------------------------------------|---------------------------------------------|---------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `compress` | `bool` | `false` | Flag to enable compression. |
-| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
-| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
-| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
-| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
-| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
-| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
-| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
-| `gc` | [GC config](#gc-subsection) | | GC configuration. |
+| Parameter | Type | Default value | Description |
+| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- |
+| `compression` | [Compression config](#compression-subsection) | | Compression config. |
+| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
+| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
+| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
+| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
+| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
+| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
+| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
+| `gc` | [GC config](#gc-subsection) | | GC configuration. |
+| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
+
+### `compression` subsection
+
+Contains compression config.
+
+```yaml
+compression:
+ enabled: true
+ level: smallest_size
+ exclude_content_types:
+ - audio/*
+ - video/*
+ estimate_compressibility: true
+ estimate_compressibility_threshold: 0.7
+```
+
+| Parameter | Type | Default value | Description |
+| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `enabled` | `bool` | `false` | Flag to enable compression. |
+| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. |
+| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
+| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
+| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
### `blobstor` subsection
@@ -194,11 +244,13 @@ blobstor:
width: 4
- type: fstree
path: /path/to/blobstor/blobovnicza
- perm: 0644
+ perm: 0o644
size: 4194304
depth: 1
width: 4
opened_cache_capacity: 50
+ opened_cache_ttl: 5m
+ opened_cache_exp_interval: 15s
```
#### Common options for sub-storages
@@ -215,14 +267,18 @@ blobstor:
| `depth` | `int` | `4` | File-system tree depth. |
#### `blobovnicza` type options
-| Parameter | Type | Default value | Description |
-|-------------------------|-----------|---------------|-------------------------------------------------------|
-| `path` | `string` | | Path to the root of the blobstor. |
-| `perm` | file mode | `0660` | Default permission for created files and directories. |
-| `size` | `size` | `1 G` | Maximum size of a single blobovnicza |
-| `depth` | `int` | `2` | Blobovnicza tree depth. |
-| `width` | `int` | `16` | Blobovnicza tree width. |
-| `opened_cache_capacity` | `int` | `16` | Maximum number of simultaneously opened blobovniczas. |
+| Parameter | Type | Default value | Description |
+|-----------------------------| ---------- |---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `path` | `string` | | Path to the root of the blobstor. |
+| `perm` | file mode | `0660` | Default permission for created files and directories. |
+| `size` | `size` | `1 G` | Maximum size of a single blobovnicza |
+| `depth` | `int` | `2` | Blobovnicza tree depth. |
+| `width` | `int` | `16` | Blobovnicza tree width. |
+| `opened_cache_capacity` | `int` | `16` | Maximum number of simultaneously opened blobovniczas. |
+| `opened_cache_ttl` | `duration` | `0` | TTL in cache for opened blobovniczas(disabled by default). In case of heavy random-read and 10 shards each with 10_000 databases and accessing 400 objects per-second we will access each db approximately once per ((10 * 10_000 / 400) = 250 seconds <= 300 seconds = 5 min). Also take in mind that in this scenario they will probably be closed earlier because of the cache capacity, so bigger values are likely to be of no use. |
+| `opened_cache_exp_interval` | `duration` | `15s` | Cache cleanup interval for expired blobovnicza's. |
+| `init_worker_count` | `int` | `5` | Maximum number of concurrent initialization workers. |
+| `rebuild_drop_timeout` | `duration` | `10s` | Timeout before drop empty blobovnicza file during rebuild. |
### `gc` subsection
@@ -232,19 +288,23 @@ Contains garbage-collection service configuration. It iterates over the blobstor
gc:
remover_batch_size: 200
remover_sleep_interval: 5m
+ expired_collector_batch_size: 500
+ expired_collector_worker_count: 5
```
-| Parameter | Type | Default value | Description |
-|--------------------------|------------|---------------|----------------------------------------------|
-| `remover_batch_size` | `int` | `100` | Amount of objects to grab in a single batch. |
-| `remover_sleep_interval` | `duration` | `1m` | Time to sleep between iterations. |
+| Parameter | Type | Default value | Description |
+|-----------------------------------|------------|---------------|----------------------------------------------------------|
+| `remover_batch_size` | `int` | `100` | Amount of objects to grab in a single batch. |
+| `remover_sleep_interval` | `duration` | `1m` | Time to sleep between iterations. |
+| `expired_collector_batch_size` | `int` | `500` | Max amount of expired objects to grab in a single batch. |
+| `expired_collector_worker_count` | `int` | `5` | Max amount of concurrent expired objects workers. |
### `metabase` subsection
```yaml
metabase:
path: /path/to/meta.db
- perm: 0644
+ perm: 0o644
max_batch_size: 200
max_batch_delay: 20ms
```
@@ -263,21 +323,78 @@ writecache:
enabled: true
path: /path/to/writecache
capacity: 4294967296
- small_object_size: 16384
max_object_size: 134217728
- workers_number: 30
+ flush_worker_count: 30
```
-| Parameter | Type | Default value | Description |
-|----------------------|------------|---------------|----------------------------------------------------------------------------------------------------------------------|
-| `path` | `string` | | Path to the metabase file. |
-| `capacity` | `size` | unrestricted | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
-| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
-| `workers_number` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
-| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
-| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
+| Parameter | Type | Default value | Description |
+| --------------------------- | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------- |
+| `path` | `string` | | Path to the metabase file. |
+| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
+| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. |
+| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
+| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
+| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
+### `limits` subsection
+
+```yaml
+limits:
+ max_read_running_ops: 10000
+ max_read_waiting_ops: 1000
+ max_write_running_ops: 1000
+ max_write_waiting_ops: 100
+ read:
+ - tag: internal
+ weight: 20
+ limit_ops: 0
+ reserved_ops: 1000
+ - tag: client
+ weight: 70
+ reserved_ops: 10000
+ - tag: background
+ weight: 5
+ limit_ops: 10000
+ reserved_ops: 0
+ - tag: writecache
+ weight: 5
+ limit_ops: 25000
+ - tag: policer
+ weight: 5
+ limit_ops: 25000
+ write:
+ - tag: internal
+ weight: 200
+ limit_ops: 0
+ reserved_ops: 100
+ - tag: client
+ weight: 700
+ reserved_ops: 1000
+ - tag: background
+ weight: 50
+ limit_ops: 1000
+ reserved_ops: 0
+ - tag: writecache
+ weight: 50
+ limit_ops: 2500
+ - tag: policer
+ weight: 50
+ limit_ops: 2500
+```
+
+| Parameter | Type | Default value | Description |
+| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
+| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
+| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
+| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
+| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
+| `read` | `[]tag` | empty | Array of shard read settings for tags. |
+| `write` | `[]tag` | empty | Array of shard write settings for tags. |
+| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
+| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
+| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
+| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
+| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section
@@ -293,37 +410,22 @@ node:
- "Price:11"
- "UN-LOCODE:RU MSK"
- "key:value"
- relay: false
persistent_sessions:
path: /sessions
persistent_state:
path: /state
- subnet:
- exit_zero: false
- entries:
- - 123
- notification:
- enabled: true
- endpoint: tls://localhost:4222
- timeout: 6s
- default_topic: topic
- certificate: /path/to/cert.pem
- key: /path/to/key.pem
- ca: /path/to/ca.pem
+ locode_db_path: "/path/to/locode/db"
```
-| Parameter | Type | Default value | Description |
-|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------|
-| `key` | `string` | | Path to the binary-encoded private key. |
-| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
-| `addresses` | `[]string` | | Addresses advertised in the netmap. |
-| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. |
-| `relay` | `bool` | | Enable relay mode. |
-| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. |
-| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. |
-| `subnet` | [Subnet config](#subnet-subsection) | | Subnet configuration. |
-| `notification` | [Notification config](#notification-subsection) | | NATS configuration. |
-
+| Parameter | Type | Default value | Description |
+|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------|
+| `key` | `string` | | Path to the binary-encoded private key. |
+| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
+| `addresses` | `[]string` | | Addresses advertised in the netmap. |
+| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. |
+| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. |
+| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. |
+| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. |
## `wallet` subsection
N3 wallet configuration.
@@ -350,27 +452,6 @@ It is used to correctly handle node restarts or crashes.
|-----------|----------|------------------------|------------------------|
| `path` | `string` | `.frostfs-storage-state` | Path to the database. |
-## `subnet` subsection
-This is an advanced section, use with caution.
-
-| Parameter | Type | Default value | Description |
-|-------------|------------|---------------|------------------------------------------------------|
-| `exit_zero` | `bool` | `false` | Exit from the default subnet. |
-| `entries` | `[]uint32` | | List of non-default subnet ID this node belongs to. |
-
-## `notification` subsection
-This is an advanced section, use with caution.
-
-| Parameter | Type | Default value | Description |
-|-----------------|------------|-------------------|-------------------------------------------------------------------|
-| `enabled` | `bool` | `false` | Flag to enable the service. |
-| `endpoint` | `string` | | NATS endpoint to connect to. |
-| `timeout` | `duration` | `5s` | Timeout for the object notification operation. |
-| `default_topic` | `string` | node's public key | Default topic to use if an object has no corresponding attribute. |
-| `certificate` | `string` | | Path to the client certificate. |
-| `key` | `string` | | Path to the client key. |
-| `ca` | `string` | | Override root CA used to verify server certificates. |
-
# `apiclient` section
Configuration for the FrostFS API client used for communication with other FrostFS nodes.
@@ -409,22 +490,113 @@ replicator:
pool_size: 10
```
-| Parameter | Type | Default value | Description |
-|---------------|------------|----------------------------------------|---------------------------------------------|
-| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
-| `pool_size` | `int` | Equal to `object.put.pool_size_remote` | Maximum amount of concurrent replications. |
+| Parameter | Type | Default value | Description |
+|---------------|------------|---------------|---------------------------------------------|
+| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
+| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. |
# `object` section
Contains object-service related parameters.
```yaml
object:
- put:
- pool_size_remote: 100
+ get:
+ priority:
+ - $attribute:ClusterName
```
-| Parameter | Type | Default value | Description |
-|-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------|
-| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
-| `put.pool_size_remote` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
-| `put.pool_size_local` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
+| Parameter | Type | Default value | Description |
+|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------|
+| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
+| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. |
+
+
+# `rpc` section
+Contains limits on the number of active RPC for specified method(s).
+
+```yaml
+rpc:
+ limits:
+ - methods:
+ - /neo.fs.v2.object.ObjectService/PutSingle
+ - /neo.fs.v2.object.ObjectService/Put
+ max_ops: 1000
+ - methods:
+ - /neo.fs.v2.object.ObjectService/Get
+ max_ops: 10000
+```
+
+| Parameter | Type | Default value | Description |
+|------------------|------------|---------------|--------------------------------------------------------------|
+| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) |
+| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit |
+
+# `runtime` section
+Contains runtime parameters.
+
+```yaml
+runtime:
+ soft_memory_limit: 1GB
+```
+
+| Parameter | Type | Default value | Description |
+|---------------------|--------|---------------|--------------------------------------------------------------------------|
+| `soft_memory_limit` | `size` | 0 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
+
+# `audit` section
+Contains audit parameters.
+
+```yaml
+audit:
+ enabled: true
+```
+
+| Parameter | Type | Default value | Description |
+|-----------|--------|---------------|---------------------------------------------------|
+| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. |
+
+
+# `multinet` section
+Contains multinet parameters.
+
+```yaml
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
+```
+
+| Parameter | Type | Default value | Description |
+| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- |
+| `enabled` | `bool` | false | If `true` then source-based routing is enabled. |
+| `subnets` | `subnet` | empty | Resulting subnets. |
+| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
+| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
+| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
+
+# `qos` section
+```yaml
+qos:
+ critical:
+ authorized_keys:
+ - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
+ - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
+ internal:
+ authorized_keys:
+ - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
+ - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
+```
+| Parameter | Type | Default value | Description |
+| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- |
+| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. |
+| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. |
diff --git a/docs/update-go-instruction.md b/docs/update-go-instruction.md
index f992250463..195e0c6b3e 100644
--- a/docs/update-go-instruction.md
+++ b/docs/update-go-instruction.md
@@ -7,7 +7,7 @@
## Update CI
Change Golang versions for unit test in CI.
-There is `go` section in `.github/workflows/go.yaml` file:
+There is `go` section in `.forgejo/workflows/*.yml` files:
```yaml
jobs:
test:
diff --git a/go.mod b/go.mod
index 61cd576475..6f19509369 100644
--- a/go.mod
+++ b/go.mod
@@ -1,110 +1,137 @@
module git.frostfs.info/TrueCloudLab/frostfs-node
-go 1.18
+go 1.23.0
require (
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230307104236-f69d2ad83c51
- git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230307124721-94476f905599
- git.frostfs.info/TrueCloudLab/hrw v1.2.0
+ code.gitea.io/sdk/gitea v0.17.1
+ git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
+ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
+ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2
+ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
+ git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa
+ git.frostfs.info/TrueCloudLab/hrw v1.2.1
+ git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
+ git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
+ git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
+ github.com/VictoriaMetrics/easyproto v0.1.4
github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
+ github.com/felixge/fgprof v0.9.5
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
- github.com/google/go-github/v39 v39.2.0
- github.com/google/uuid v1.3.0
- github.com/hashicorp/golang-lru/v2 v2.0.1
- github.com/klauspost/compress v1.15.13
- github.com/mitchellh/go-homedir v1.1.0
+ github.com/gdamore/tcell/v2 v2.7.4
+ github.com/go-pkgz/expirable-cache/v3 v3.0.0
+ github.com/google/uuid v1.6.0
+ github.com/hashicorp/golang-lru/v2 v2.0.7
+ github.com/klauspost/compress v1.17.4
+ github.com/mailru/easyjson v0.7.7
github.com/mr-tron/base58 v1.2.0
- github.com/multiformats/go-multiaddr v0.8.0
- github.com/nats-io/nats.go v1.22.1
- github.com/nspcc-dev/neo-go v0.100.1
- github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb // indirect
+ github.com/multiformats/go-multiaddr v0.15.0
+ github.com/nspcc-dev/neo-go v0.106.3
github.com/olekukonko/tablewriter v0.0.5
- github.com/panjf2000/ants/v2 v2.4.0
- github.com/paulmach/orb v0.2.2
- github.com/prometheus/client_golang v1.13.0
- github.com/spf13/cast v1.5.0
- github.com/spf13/cobra v1.6.1
+ github.com/panjf2000/ants/v2 v2.9.0
+ github.com/prometheus/client_golang v1.19.0
+ github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130
+ github.com/spf13/cast v1.6.0
+ github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
- github.com/spf13/viper v1.14.0
- github.com/stretchr/testify v1.8.1
- go.etcd.io/bbolt v1.3.6
- go.uber.org/atomic v1.10.0
- go.uber.org/zap v1.24.0
- golang.org/x/term v0.3.0
- google.golang.org/grpc v1.51.0
- google.golang.org/protobuf v1.28.1
+ github.com/spf13/viper v1.19.0
+ github.com/ssgreg/journald v1.0.0
+ github.com/stretchr/testify v1.9.0
+ go.etcd.io/bbolt v1.3.10
+ go.opentelemetry.io/otel v1.31.0
+ go.opentelemetry.io/otel/trace v1.31.0
+ go.uber.org/zap v1.27.0
+ golang.org/x/sync v0.12.0
+ golang.org/x/sys v0.31.0
+ golang.org/x/term v0.30.0
+ google.golang.org/grpc v1.69.2
+ google.golang.org/protobuf v1.36.1
gopkg.in/yaml.v3 v3.0.1
)
-require golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2
+require (
+ github.com/sagikazarmark/locafero v0.6.0 // indirect
+ github.com/sagikazarmark/slog-shim v0.1.0 // indirect
+ github.com/sourcegraph/conc v0.3.0 // indirect
+)
require (
- git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
- github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 // indirect
- github.com/benbjohnson/clock v1.1.0 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
- github.com/fsnotify/fsnotify v1.6.0 // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/golang/snappy v0.0.3 // indirect
- github.com/google/go-querystring v1.1.0 // indirect
- github.com/gorilla/websocket v1.4.2 // indirect
- github.com/hashicorp/golang-lru v0.6.0 // indirect
+ github.com/bits-and-blooms/bitset v1.13.0 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/consensys/bavard v0.1.13 // indirect
+ github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
+ github.com/davidmz/go-pageant v1.0.2 // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/gdamore/encoding v1.0.0 // indirect
+ github.com/go-fed/httpsig v1.1.0 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/golang/snappy v0.0.4 // indirect
+ github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
+ github.com/gorilla/websocket v1.5.1 // indirect
+ github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
+ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
+ github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/holiman/uint256 v1.2.0 // indirect
- github.com/inconshreveable/mousetrap v1.0.1 // indirect
- github.com/ipfs/go-cid v0.3.2 // indirect
- github.com/klauspost/cpuid/v2 v2.2.2 // indirect
- github.com/magiconair/properties v1.8.6 // indirect
- github.com/mattn/go-runewidth v0.0.9 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
- github.com/minio/sha256-simd v1.0.0 // indirect
+ github.com/holiman/uint256 v1.2.4 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/ipfs/go-cid v0.5.0 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.10 // indirect
+ github.com/klauspost/reedsolomon v1.12.1 // indirect
+ github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
+ github.com/magiconair/properties v1.8.7 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
+ github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
- github.com/multiformats/go-multibase v0.1.1 // indirect
- github.com/multiformats/go-multihash v0.2.1 // indirect
+ github.com/multiformats/go-multibase v0.2.0 // indirect
+ github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
- github.com/nats-io/nats-server/v2 v2.7.4 // indirect
- github.com/nats-io/nkeys v0.3.0 // indirect
- github.com/nats-io/nuid v1.0.1 // indirect
- github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
- github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
- github.com/pelletier/go-toml v1.9.5 // indirect
- github.com/pelletier/go-toml/v2 v2.0.5 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/procfs v0.8.0 // indirect
+ github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
+ github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec // indirect
+ github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.2 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+ github.com/prometheus/client_model v0.5.0 // indirect
+ github.com/prometheus/common v0.48.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
- github.com/spf13/afero v1.9.2 // indirect
- github.com/spf13/jwalterweatherman v1.1.0 // indirect
- github.com/subosito/gotenv v1.4.1 // indirect
- github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 // indirect
- github.com/twmb/murmur3 v1.1.5 // indirect
- github.com/urfave/cli v1.22.5 // indirect
- go.uber.org/multierr v1.9.0 // indirect
- golang.org/x/crypto v0.4.0 // indirect
- golang.org/x/net v0.4.0 // indirect
- golang.org/x/sync v0.1.0 // indirect
- golang.org/x/sys v0.3.0 // indirect
- golang.org/x/text v0.5.0 // indirect
- golang.org/x/time v0.1.0 // indirect
- google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
+ github.com/spf13/afero v1.11.0 // indirect
+ github.com/subosito/gotenv v1.6.0 // indirect
+ github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
+ github.com/twmb/murmur3 v1.1.8 // indirect
+ github.com/urfave/cli v1.22.14 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
+ go.opentelemetry.io/otel/metric v1.31.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.31.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.3.1 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/crypto v0.36.0 // indirect
+ golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
+ golang.org/x/net v0.30.0 // indirect
+ golang.org/x/text v0.23.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
- lukechampine.com/blake3 v1.1.7 // indirect
+ lukechampine.com/blake3 v1.4.0 // indirect
+ rsc.io/tmplfunc v0.0.3 // indirect
)
-retract (
- v1.22.1 // Contains retraction only.
- v1.22.0 // Published accidentally.
-)
+replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07
diff --git a/go.sum b/go.sum
index 93f805d21f..5b075f60a5 100644
--- a/go.sum
+++ b/go.sum
@@ -1,102 +1,49 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230307104236-f69d2ad83c51 h1:l4+K1hN+NuWNtlZZoV8yRRP3Uu7PifL05ukEqKcb0Ks=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230307104236-f69d2ad83c51/go.mod h1:n0DxKYulu2Ar73R6OcNF34LiL/Xa+iDR7GZuaOChbLE=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o=
+code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
+code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230307124721-94476f905599 h1:mzGX2RX8R8H/tUqrUu1TcYk4QRDBcBIWGYscPncfLOQ=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230307124721-94476f905599/go.mod h1:z7zcpGY+puI5puyy5oyFbf20vWp84WtslCxcr6/kv5c=
-git.frostfs.info/TrueCloudLab/hrw v1.2.0 h1:KvAES7xIqmQBGd2q8KanNosD9+4BhU/zqD5Kt5KSflk=
-git.frostfs.info/TrueCloudLab/hrw v1.2.0/go.mod h1:mq2sbvYfO+BB6iFZwYBkgC0yc6mJNx+qZi4jW918m+Y=
+git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY=
+git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
+git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU=
+git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs=
+git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
+git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
+git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
+git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
+git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
+git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
+git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 h1:eTefR8y2y9cg7X5kybIcXDdmABfk/3A2awdmFD3zOsA=
+git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/CityOfZion/neo-go v0.62.1-pre.0.20191114145240-e740fbe708f8/go.mod h1:MJCkWUBhi9pn/CrYO1Q3P687y2KeahrOPS9BD9LDGb0=
-github.com/CityOfZion/neo-go v0.70.1-pre.0.20191209120015-fccb0085941e/go.mod h1:0enZl0az8xA6PVkwzEOwPWVJGqlt/GO4hA4kmQ5Xzig=
-github.com/CityOfZion/neo-go v0.70.1-pre.0.20191212173117-32ac01130d4c/go.mod h1:JtlHfeqLywZLswKIKFnAp+yzezY4Dji9qlfQKB2OD/I=
-github.com/CityOfZion/neo-go v0.71.1-pre.0.20200129171427-f773ec69fb84/go.mod h1:FLI526IrRWHmcsO+mHsCbj64pJZhwQFTLJZu+A4PGOA=
-github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
-github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg=
-github.com/abiosoft/ishell/v2 v2.0.2/go.mod h1:E4oTCXfo6QjoCart0QYa5m9w4S+deXs/P/9jA77A9Bs=
-github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db/go.mod h1:rB3B4rKii8V21ydCbIzH5hZiCQE7f5E9SzUb/ZZx530=
-github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
-github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210521073959-f0d4d129b7f1/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
-github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 h1:npHgfD4Tl2WJS3AJaMUi5ynGDPUBfkg3U3fCzDyXZ+4=
-github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4=
+git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
+github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
+github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
+github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
-github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
-github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
-github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
-github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
-github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
-github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
-github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
-github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
-github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
-github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
-github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
+github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
+github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
+github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
@@ -106,805 +53,391 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
+github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
+github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 h1:tYj5Ydh5D7Xg2R1tJnoG36Yta7NVB8C0vx36oPA3Bbw=
+github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
-github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0=
+github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
-github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
+github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
+github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
+github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
+github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI=
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M=
-github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
-github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
-github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-redis/redis v6.10.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
+github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
+github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU=
+github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg=
+github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
+github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw=
+github.com/go-pkgz/expirable-cache/v3 v3.0.0/go.mod h1:2OQiDyEGQalYecLWmXprm3maPXeVb5/6/X7yRPYTzec=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
+github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-github/v39 v39.2.0 h1:rNNM311XtPOz5rDdsJXAp2o8F67X9FnROXTvto3aSnQ=
-github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
-github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
-github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
-github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
-github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
+github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
+github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tggROQKKcnh4eItay6z/HbHLqghBxS8g7pyMhmDio=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 h1:HcUWd006luQPljE73d5sk+/VgYPGUReEVz2y1/qylwY=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1/go.mod h1:w9Y7gY31krpLmrVU5ZPG9H7l9fZuRu5/3R3S3FMtVQ4=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
-github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
+github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
+github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
-github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc=
-github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw=
-github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
-github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0=
-github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
-github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.2 h1:xPMwiykqNK9VK0NYC3+jTMYv9I6Vl3YdjZgPZKG3zO0=
-github.com/klauspost/cpuid/v2 v2.2.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
+github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
+github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
+github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q=
+github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
-github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
+github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
+github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
+github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
+github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
-github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
+github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
-github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
-github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
+github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
+github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
-github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU=
-github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs=
-github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI=
-github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8=
-github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
-github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc=
+github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo=
+github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
+github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
+github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
+github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
+github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY=
-github.com/nats-io/nats-server/v2 v2.7.4 h1:c+BZJ3rGzUKCBIM4IXO8uNT2u1vajGbD1kPA6wqCEaM=
-github.com/nats-io/nats-server/v2 v2.7.4/go.mod h1:1vZ2Nijh8tcyNe8BDVyTviCd9NYzRbubQYiEHsvOQWc=
-github.com/nats-io/nats.go v1.22.1 h1:XzfqDspY0RNufzdrB8c4hFR+R3dahkxlpWe5+IWJzbE=
-github.com/nats-io/nats.go v1.22.1/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA=
-github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
-github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
-github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
-github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/nspcc-dev/dbft v0.0.0-20191205084618-dacb1a30c254/go.mod h1:w1Ln2aT+dBlPhLnuZhBV+DfPEdS2CHWWLp5JTScY3bw=
-github.com/nspcc-dev/dbft v0.0.0-20191209120240-0d6b7568d9ae/go.mod h1:3FjXOoHmA51EGfb5GS/HOv7VdmngNRTssSeQ729dvGY=
-github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a/go.mod h1:/YFK+XOxxg0Bfm6P92lY5eDSLYfp06XOdL8KAVgXjVk=
-github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1/go.mod h1:O0qtn62prQSqizzoagHmuuKoz8QMkU3SzBoKdEvm3aQ=
-github.com/nspcc-dev/dbft v0.0.0-20210721160347-1b03241391ac/go.mod h1:U8MSnEShH+o5hexfWJdze6uMFJteP0ko7J2frO7Yu1Y=
-github.com/nspcc-dev/dbft v0.0.0-20220902113116-58a5e763e647/go.mod h1:g9xisXmX9NP9MjioaTe862n9SlZTrP+6PVUWLBYOr98=
-github.com/nspcc-dev/go-ordered-json v0.0.0-20210915112629-e1b6cce73d02/go.mod h1:79bEUDEviBHJMFV6Iq6in57FEOCMcRhfQnfaf0ETA5U=
-github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 h1:n4ZaFCKt1pQJd7PXoMJabZWK9ejjbLOVrkl/lOUmshg=
-github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22/go.mod h1:79bEUDEviBHJMFV6Iq6in57FEOCMcRhfQnfaf0ETA5U=
-github.com/nspcc-dev/hrw v1.0.9/go.mod h1:l/W2vx83vMQo6aStyx2AuZrJ+07lGv2JQGlVkPG06MU=
-github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg=
-github.com/nspcc-dev/neo-go v0.98.0/go.mod h1:E3cc1x6RXSXrJb2nDWXTXjnXk3rIqVN8YdFyWv+FrqM=
-github.com/nspcc-dev/neo-go v0.99.4/go.mod h1:mKTolfRUfKjFso5HPvGSQtUZc70n0VKBMs16eGuC5gA=
-github.com/nspcc-dev/neo-go v0.100.1 h1:yugxbQRdzM+ObVa5mtr9/n4rYjxSIrryne8MVr9NBwU=
-github.com/nspcc-dev/neo-go v0.100.1/go.mod h1:Nnp7F4e9IBccsgtCeLtUWV+0T6gk1PtP5HRtA13hUfc=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220927123257-24c107e3a262/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb h1:GFxfkpXEYAbMIr69JpKOsQWeLOaGrd49HNAor8uDW+A=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
-github.com/nspcc-dev/neofs-api-go/v2 v2.11.0-pre.0.20211201134523-3604d96f3fe1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
-github.com/nspcc-dev/neofs-api-go/v2 v2.11.1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
-github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA=
-github.com/nspcc-dev/neofs-crypto v0.2.3/go.mod h1:8w16GEJbH6791ktVqHN9YRNH3s9BEEKYxGhlFnp0cDw=
-github.com/nspcc-dev/neofs-crypto v0.3.0/go.mod h1:8w16GEJbH6791ktVqHN9YRNH3s9BEEKYxGhlFnp0cDw=
-github.com/nspcc-dev/neofs-crypto v0.4.0/go.mod h1:6XJ8kbXgOfevbI2WMruOtI+qUJXNwSGM/E9eClXxPHs=
-github.com/nspcc-dev/neofs-sdk-go v0.0.0-20211201182451-a5b61c4f6477/go.mod h1:dfMtQWmBHYpl9Dez23TGtIUKiFvCIxUZq/CkSIhEpz4=
-github.com/nspcc-dev/neofs-sdk-go v0.0.0-20220113123743-7f3162110659/go.mod h1:/jay1lr3w7NQd/VDBkEhkJmDmyPNsu4W+QV2obsUV40=
-github.com/nspcc-dev/rfc6979 v0.1.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso=
-github.com/nspcc-dev/rfc6979 v0.2.0 h1:3e1WNxrN60/6N0DW7+UYisLeZJyfqZTNOjeV/toYvOE=
-github.com/nspcc-dev/rfc6979 v0.2.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso=
-github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk=
+github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec h1:vDrbVXF2+2uP0RlkZmem3QYATcXCu9BzzGGCNsNcK7Q=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
+github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM=
+github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/panjf2000/ants/v2 v2.4.0 h1:embKPQeNWMRbnrRKURv4TXJwjQRWMEAfqZT6Pe5hZNc=
-github.com/panjf2000/ants/v2 v2.4.0/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
-github.com/paulmach/orb v0.2.2 h1:PblToKAbU0xHVypex/GdZfibA1CeCfN5s0UjxyWExdo=
-github.com/paulmach/orb v0.2.2/go.mod h1:FkcWtplUAIVqAuhAOV2d3rpbnQyliDOjOcLW9dUrfdU=
-github.com/paulmach/protoscan v0.2.1-0.20210522164731-4e53c6875432/go.mod h1:2sV+uZ/oQh66m4XJVZm5iqUZ62BN88Ex1E+TTS0nLzI=
-github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
-github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
-github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
-github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
+github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo=
+github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
+github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
+github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
-github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
-github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
-github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
+github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
+github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130 h1:o1CYtoFOm6xJK3DvDAEG5wDJPLj+SoxUtUDFaQgt1iY=
+github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130/go.mod h1:02iFIz7K/A9jGCvrizLPvoqr4cEIx7q54RH5Qudkrss=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
+github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0=
+github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
+github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
+github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
+github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
-github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
-github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
-github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
-github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
-github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
-github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
+github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
+github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
-github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
+github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
+github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
+github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU=
+github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
-github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
-github.com/syndtr/goleveldb v0.0.0-20180307113352-169b1b37be73/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
-github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs=
-github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
-github.com/twmb/murmur3 v1.1.5 h1:i9OLS9fkuLzBXjt6dptlAEyk58fJsSTXbRg3SgVyqgk=
-github.com/twmb/murmur3 v1.1.5/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
-github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74 h1:JwtAtbp7r/7QSyGz8mKUbYJBg2+6Cd7OjM8o/GNOcVo=
-github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74/go.mod h1:RmMWU37GKR2s6pgrIEB4ixgpVCt/cf7dnJv3fuH1J1c=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
+github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
+github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
+github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
+github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=
+github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
-github.com/yuin/gopher-lua v0.0.0-20191128022950-c6266f4fe8d7/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
-go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
-go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
-go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
-golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
+go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
+go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
+go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
-golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI=
-golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
+golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
-golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
-golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
-golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
+golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210429154555-c04ba851c2a4/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
-golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
-golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
-golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180318012157-96caea41033d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
+golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
-google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
-google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
-google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
+google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
+google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/abiosoft/ishell.v2 v2.0.0/go.mod h1:sFp+cGtH6o4s1FtpVPTMcHq2yue+c4DGOVohJCPUzwY=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
@@ -912,15 +445,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
-lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w=
+lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0=
+rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
+rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
diff --git a/help.mk b/help.mk
index c065ec862d..a2ac989dcb 100644
--- a/help.mk
+++ b/help.mk
@@ -8,4 +8,4 @@ help:
@echo ''
@echo ' Targets:'
@echo ''
- @awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9_-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
+ @awk '/^#/{ comment = substr($$0,3) } /^[a-zA-Z][a-zA-Z0-9_-]+:/{ print " ", $$1, comment; comment = "" }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
diff --git a/internal/ape/util.go b/internal/ape/util.go
new file mode 100644
index 0000000000..99eba95ba7
--- /dev/null
+++ b/internal/ape/util.go
@@ -0,0 +1,11 @@
+package ape
+
+import "regexp"
+
+var (
+ SubjectNameRegexp = regexp.MustCompile(`^[\w+=,.@-]{1,64}$`)
+ GroupNameRegexp = regexp.MustCompile(`^[\w+=,.@-]{1,128}$`)
+
+ // NamespaceNameRegexp similar to https://git.frostfs.info/TrueCloudLab/frostfs-contract/src/commit/f2a82aa635aa57d9b05092d8cf15b170b53cc324/nns/nns_contract.go#L690
+ NamespaceNameRegexp = regexp.MustCompile(`(^$)|(^[a-z0-9]{1,2}$)|(^[a-z0-9][a-z0-9-]{1,48}[a-z0-9]$)`)
+)
diff --git a/internal/assert/cond.go b/internal/assert/cond.go
new file mode 100644
index 0000000000..113d2eba90
--- /dev/null
+++ b/internal/assert/cond.go
@@ -0,0 +1,29 @@
+package assert
+
+import (
+ "fmt"
+ "strings"
+)
+
+func True(cond bool, details ...string) {
+ if !cond {
+ panic(strings.Join(details, " "))
+ }
+}
+
+func False(cond bool, details ...string) {
+ if cond {
+ panic(strings.Join(details, " "))
+ }
+}
+
+func NoError(err error, details ...string) {
+ if err != nil {
+ content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " "))
+ panic(content)
+ }
+}
+
+func Fail(details ...string) {
+ panic(strings.Join(details, " "))
+}
diff --git a/internal/audit/consts.go b/internal/audit/consts.go
new file mode 100644
index 0000000000..f4fa19ab9c
--- /dev/null
+++ b/internal/audit/consts.go
@@ -0,0 +1,7 @@
+package audit
+
+const (
+ InvalidValue = "invalid_value"
+ NotDefined = "not_defined"
+ Empty = "empty"
+)
diff --git a/internal/audit/request.go b/internal/audit/request.go
new file mode 100644
index 0000000000..17666ab4b9
--- /dev/null
+++ b/internal/audit/request.go
@@ -0,0 +1,47 @@
+package audit
+
+import (
+ "context"
+
+ crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "go.uber.org/zap"
+)
+
+type Request interface {
+ GetVerificationHeader() *session.RequestVerificationHeader
+}
+
+type Target interface {
+ String() string
+}
+
+func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) {
+ var key []byte
+ if req != nil {
+ key = req.GetVerificationHeader().GetBodySignature().GetKey()
+ }
+ LogRequestWithKey(ctx, log, operation, key, target, status)
+}
+
+func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) {
+ object, subject := NotDefined, NotDefined
+
+ publicKey := crypto.UnmarshalPublicKey(key)
+ if publicKey != nil {
+ subject = ((*keys.PublicKey)(publicKey)).StringCompressed()
+ }
+
+ if target != nil {
+ object = target.String()
+ }
+
+ log.Info(ctx, logs.AuditEventLogRecord,
+ zap.String("operation", operation),
+ zap.String("object", object),
+ zap.String("subject", subject),
+ zap.Bool("success", status))
+}
diff --git a/internal/audit/target.go b/internal/audit/target.go
new file mode 100644
index 0000000000..2d6881e299
--- /dev/null
+++ b/internal/audit/target.go
@@ -0,0 +1,102 @@
+package audit
+
+import (
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type ModelType[T any] interface {
+ ReadFromV2(m T) error
+ String() string
+}
+
+func TargetFromRef[T any](ref *T, model ModelType[T]) Target {
+ if ref == nil {
+ return stringTarget{s: NotDefined}
+ }
+ if err := model.ReadFromV2(*ref); err != nil {
+ return stringTarget{s: InvalidValue}
+ }
+ return stringTarget{s: model.String()}
+}
+
+func TargetFromRefs[T any](refs []*T, model ModelType[T]) Target {
+ if len(refs) == 0 {
+ return stringTarget{s: NotDefined}
+ }
+ sb := &strings.Builder{}
+ for idx, ref := range refs {
+ if idx > 0 {
+ sb.WriteString(";")
+ }
+ if ref == nil {
+ sb.WriteString(Empty)
+ continue
+ }
+ if err := model.ReadFromV2(*ref); err != nil {
+ sb.WriteString(InvalidValue)
+ } else {
+ sb.WriteString(model.String())
+ }
+ }
+ return sb
+}
+
+type stringTarget struct {
+ s string
+}
+
+func (t stringTarget) String() string {
+ return t.s
+}
+
+func TargetFromString(s string) Target {
+ if len(s) == 0 {
+ s = Empty
+ }
+ return stringTarget{s: s}
+}
+
+func TargetFromChainID(chainTargetType, chainTargetName string, chainID []byte) Target {
+ if len(chainTargetType) == 0 && len(chainTargetName) == 0 && len(chainID) == 0 {
+ return stringTarget{s: NotDefined}
+ }
+ t, n, c := Empty, Empty, Empty
+ if len(chainTargetType) > 0 {
+ t = chainTargetType
+ }
+ if len(chainTargetName) > 0 {
+ n = chainTargetName
+ }
+ if len(chainID) > 0 {
+ c = string(chainID)
+ }
+ return stringTarget{s: t + ":" + n + ":" + c}
+}
+
+func TargetFromContainerIDObjectID(containerID *refs.ContainerID, objectID *refs.ObjectID) Target {
+ if containerID == nil && objectID == nil {
+ return stringTarget{s: NotDefined}
+ }
+ c, o := Empty, Empty
+ if containerID != nil {
+ var cnr cid.ID
+ if err := cnr.ReadFromV2(*containerID); err != nil {
+ c = InvalidValue
+ } else {
+ c = cnr.EncodeToString()
+ }
+ }
+ if objectID != nil {
+ var obj oid.ID
+ if err := obj.ReadFromV2(*objectID); err != nil {
+ o = InvalidValue
+ } else {
+ o = obj.EncodeToString()
+ }
+ }
+ return stringTarget{s: c + "/" + o}
+}
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
new file mode 100644
index 0000000000..626372f435
--- /dev/null
+++ b/internal/logs/logs.go
@@ -0,0 +1,521 @@
+package logs
+
+// Common service logs.
+const (
+ ServingRequest = "serving request..."
+ OperationFinishedSuccessfully = "operation finished successfully"
+ OperationFinishedWithError = "operation finished with error"
+
+ TryingToExecuteInContainer = "trying to execute in container..."
+ CouldNotGetCurrentEpochNumber = "could not get current epoch number"
+ ProcessEpoch = "process epoch"
+ ProcessingNode = "processing node..."
+ NoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration"
+ InterruptPlacementIterationByContext = "interrupt placement iteration by context"
+
+ Notification = "notification"
+)
+
+const (
+ InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain"
+ InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain"
+ InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
+ InnerringCantGetInnerRingIndex = "can't get inner ring index"
+ InnerringCantGetInnerRingSize = "can't get inner ring size"
+ InnerringCantGetAlphabetIndex = "can't get alphabet index"
+ InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range"
+ InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list"
+ InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract"
+ InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number"
+ InnerringNotarySupport = "notary support"
+ InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled"
+ InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled"
+ InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
+ InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global"
+ InnerringCantVoteForPreparedValidators = "can't vote for prepared validators"
+ InnerringNewBlock = "new block"
+ InnerringCantUpdatePersistentState = "can't update persistent state"
+ InnerringCloserError = "closer error"
+ InnerringReadConfigFromBlockchain = "read config from blockchain"
+ PolicerCouldNotGetContainer = "could not get container"
+ PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
+ PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
+ PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object"
+ PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected"
+ PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance"
+ PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK"
+ PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected"
+ PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy"
+ PolicerRoutineStopped = "routine stopped"
+ PolicerFailureAtObjectSelectForReplication = "failure at object select for replication"
+ PolicerPoolSubmission = "pool submission"
+ PolicerUnableToProcessObj = "unable to process object"
+ ReplicatorFinishWork = "finish work"
+ ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage"
+ ReplicatorCouldNotReplicateObject = "could not replicate object"
+ ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
+ TreeRedirectingTreeServiceQuery = "redirecting tree service query"
+ TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
+ TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
+ TreeSynchronizeTree = "synchronize tree"
+ TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes"
+ TreeFailedToRunTreeSynchronizationForSpecificNode = "failed to run tree synchronization for specific node"
+ TreeFailedToParseAddressForTreeSynchronization = "failed to parse address for tree synchronization"
+ TreeFailedToConnectForTreeSynchronization = "failed to connect for tree synchronization"
+ TreeSyncingTrees = "syncing trees..."
+ TreeCouldNotFetchContainers = "could not fetch containers"
+ TreeTreesHaveBeenSynchronized = "trees have been synchronized"
+ TreeSyncingContainerTrees = "syncing container trees..."
+ TreeCouldNotSyncTrees = "could not sync trees"
+ TreeContainerTreesHaveBeenSynced = "container trees have been synced"
+ TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization"
+ TreeRemovingRedundantTrees = "removing redundant trees..."
+ TreeCouldNotCheckIfContainerExisted = "could not check if the container ever existed"
+ TreeCouldNotRemoveRedundantTree = "could not remove redundant tree"
+ TreeCouldNotCalculateContainerNodes = "could not calculate container nodes"
+ TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation"
+ TreeDoNotSendUpdateToTheNode = "do not send update to the node"
+ TreeFailedToSentUpdateToTheNode = "failed to sent update to the node"
+ TreeErrorDuringReplication = "error during replication"
+ PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage"
+ PersistentCouldNotDeleteSToken = "could not delete token"
+ PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens"
+ TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source"
+ DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY"
+ DeleteAssemblingChain = "assembling chain..."
+ DeleteCollectingChildren = "collecting children..."
+ DeleteSupplementBySplitID = "supplement by split ID"
+ DeleteFormingTombstoneStructure = "forming tombstone structure..."
+ DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..."
+ DeleteFormingSplitInfo = "forming split info..."
+ DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..."
+ DeleteMembersSuccessfullyCollected = "members successfully collected"
+ DeleteECObjectReceived = "erasure-coded object received, form tombstone"
+ GetRemoteCallFailed = "remote call failed"
+ GetCanNotAssembleTheObject = "can not assemble the object"
+ GetTryingToAssembleTheObject = "trying to assemble the object..."
+ GetTryingToAssembleTheECObject = "trying to assemble the ec object..."
+ GetAssemblingSplittedObject = "assembling splitted object..."
+ GetAssemblingECObject = "assembling erasure-coded object..."
+ GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed"
+ GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object"
+ GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object"
+ GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object"
+ GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
+ GetAssemblingECObjectCompleted = "assembling erasure-coded object completed"
+ GetFailedToAssembleSplittedObject = "failed to assemble splitted object"
+ GetFailedToAssembleECObject = "failed to assemble erasure-coded object"
+ GetCouldNotGenerateContainerTraverser = "could not generate container traverser"
+ GetCouldNotConstructRemoteNodeClient = "could not construct remote node client"
+ GetCouldNotWriteHeader = "could not write header"
+ GetCouldNotWritePayloadChunk = "could not write payload chunk"
+ GetLocalGetFailed = "local get failed"
+ GetReturnResultDirectly = "return result directly"
+ GetCompletingTheOperation = "completing the operation"
+ GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed"
+ GetRequestedObjectIsVirtual = "requested object is virtual"
+ GetRequestedObjectIsEC = "requested object is erasure-coded"
+ GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds"
+ GetUnexpectedECObject = "failed to get EC object from node: expected EC info, but got full object"
+ PutAdditionalContainerBroadcastFailure = "additional container broadcast failure"
+ SearchReturnResultDirectly = "return result directly"
+ SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client"
+ SearchRemoteOperationFailed = "remote operation failed"
+ SearchCouldNotGenerateContainerTraverser = "could not generate container traverser"
+ SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
+ SearchLocalOperationFailed = "local operation failed"
+ UtilObjectServiceError = "object service error"
+ V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
+ V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
+ ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
+ ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch"
+ ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch"
+ ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node"
+ ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established"
+ ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node"
+ ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC"
+ ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
+ ClientNotaryDepositInvoke = "notary deposit invoke"
+ ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked"
+ ClientNotaryRequestInvoked = "notary request invoked"
+ ClientNotaryDepositTransactionWasSuccessfullyPersisted = "notary deposit transaction was successfully persisted"
+ ClientNeoClientInvoke = "neo client invoke"
+ ClientNativeGasTransferInvoke = "native gas transfer invoke"
+ ClientBatchGasTransferInvoke = "batch gas transfer invoke"
+ ClientCantGetBlockchainHeight = "can't get blockchain height"
+ ClientCantGetBlockchainHeight243 = "can't get blockchain height"
+ EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
+ EventStopEventListenerByError = "stop event listener by error"
+ EventStopEventListenerByContext = "stop event listener by context"
+ EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
+ EventNilNotificationEventWasCaught = "nil notification event was caught"
+ EventStopEventListenerByNotaryChannel = "stop event listener by notary channel"
+ EventNilNotaryEventWasCaught = "nil notary event was caught"
+ EventStopEventListenerByBlockChannel = "stop event listener by block channel"
+ EventNilBlockWasCaught = "nil block was caught"
+ EventListenerWorkerPoolDrained = "listener worker pool drained"
+ EventEventParserNotSet = "event parser not set"
+ EventCouldNotParseNotificationEvent = "could not parse notification event"
+ EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered"
+ EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event"
+ EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event"
+ EventNotaryParserNotSet = "notary parser not set"
+ EventCouldNotParseNotaryEvent = "could not parse notary event"
+ EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
+ EventRegisteredNewEventParser = "registered new event parser"
+ EventRegisteredNewEventHandler = "registered new event handler"
+ EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
+ StorageOperation = "local object storage operation"
+ BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
+ BlobovniczaOpeningBoltDB = "opening BoltDB"
+ BlobovniczaInitializing = "initializing..."
+ BlobovniczaAlreadyInitialized = "already initialized"
+ BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range"
+ BlobovniczaClosingBoltDB = "closing BoltDB"
+ BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket"
+ BlobstorOpening = "opening..."
+ BlobstorInitializing = "initializing..."
+ BlobstorClosing = "closing..."
+ BlobstorCouldntCloseStorage = "couldn't close storage"
+ BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking"
+ BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration"
+ EngineShardHasBeenRemoved = "shard has been removed"
+ EngineCouldNotCloseRemovedShard = "could not close removed shard"
+ EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping"
+ EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard"
+ EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping"
+ EngineCouldNotCloseShard = "could not close shard"
+ EngineCouldNotReloadAShard = "could not reload a shard"
+ EngineAddedNewShard = "added new shard"
+ EngineCouldNotPutObjectToShard = "could not put object to shard"
+ EngineCouldNotCheckObjectExistence = "could not check object existence when put object to shard"
+ EngineErrorDuringSearchingForObjectChildren = "error during searching for object children"
+ EngineCouldNotInhumeObjectInShard = "could not inhume object in shard"
+ EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies"
+ EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine"
+ EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies"
+ EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check"
+ EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks"
+ EngineInterruptGettingLockers = "can't get object's lockers"
+ EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks"
+ EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones"
+ EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only"
+ EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode"
+ EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold"
+ EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold"
+ EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request"
+ EngineStartedShardsEvacuation = "started shards evacuation"
+ EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully"
+ EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error"
+ EngineObjectIsMovedToAnotherShard = "object is moved to another shard"
+ MetabaseCreatedDirectoryForMetabase = "created directory for Metabase"
+ MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase"
+ MetabaseCheckingMetabaseVersion = "checking metabase version"
+ ShardCantSelectAllObjects = "can't select all objects"
+ ShardSettingShardMode = "setting shard mode"
+ ShardShardModeSetSuccessfully = "shard mode set successfully"
+ ShardFetchingObjectWithoutMeta = "fetching object without meta"
+ ShardObjectIsMissingInWritecache = "object is missing in write-cache"
+ ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache"
+ ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor"
+ ShardMetaObjectCounterRead = "meta: object counter read"
+ ShardMetaCantReadContainerList = "meta: can't read container list"
+ ShardMetaCantReadContainerSize = "meta: can't read container size"
+ ShardMetaInfoPresentButObjectNotFound = "meta info was present, but the object is missing"
+ ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode"
+ ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode"
+ ShardCouldNotUnmarshalObject = "could not unmarshal object"
+ ShardSkipObjectFromResyncContainerDeleted = "object skipped from metabase resync: container deleted"
+ ShardCouldNotCloseShardComponent = "could not close shard component"
+ ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode"
+ ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode"
+ ShardStopEventListenerByClosedEventChannel = "stop event listener by closed `event` channel"
+ ShardStopEventListenerByClosedStopChannel = "stop event listener by closed `stop` channel"
+ ShardEventProcessingInProgress = "event processing is in progress, skip the received"
+ ShardStopEventListenerByContext = "stop event listener by context"
+ ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool"
+ ShardGCIsStopped = "GC is stopped"
+ ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..."
+ ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed"
+ ShardCouldNotDeleteTheObjects = "could not delete the objects"
+ ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed"
+ ShardCouldNotInhumeTheObjects = "could not inhume the objects"
+ ShardStartedExpiredTombstonesHandling = "started expired tombstones handling"
+ ShardIteratingTombstones = "iterating tombstones"
+ ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones"
+ ShardIteratorOverGraveyardFailed = "iterator over graveyard failed"
+ ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch"
+ ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling"
+ ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed"
+ ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage"
+ ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records"
+ ShardFailureToUnlockObjects = "failure to unlock objects"
+ ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
+ ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
+ ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
+ ShardCouldNotFindObject = "could not find object"
+ WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
+ WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
+ BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
+ BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
+ BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level"
+ BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza"
+ BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed"
+ BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza"
+ BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's"
+ BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..."
+ BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..."
+ AlphabetTick = "tick"
+ AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained"
+ AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event"
+ AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event"
+ AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method"
+ AlphabetStorageNodeEmissionIsOff = "storage node emission is off"
+ AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes"
+ AlphabetGasEmission = "gas emission"
+ AlphabetCantParseNodePublicKey = "can't parse node public key"
+ AlphabetCantTransferGas = "can't transfer gas"
+ AlphabetCantTransferGasToWallet = "can't transfer gas to wallet"
+ AlphabetAlphabetWorkerPool = "alphabet worker pool"
+ BalanceBalanceWorkerPoolDrained = "balance worker pool drained"
+ BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock"
+ BalanceCantSendLockAssetTx = "can't send lock asset tx"
+ BalanceBalanceWorkerPool = "balance worker pool"
+ ContainerContainerWorkerPool = "container worker pool"
+ ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained"
+ ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put"
+ ContainerPutContainerCheckFailed = "put container check failed"
+ ContainerCouldNotApprovePutContainer = "could not approve put container"
+ ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
+ ContainerDeleteContainerCheckFailed = "delete container check failed"
+ ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
+ FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
+ FrostFSCantRelaySetConfigEvent = "can't relay set config event"
+ FrostFSFrostfsWorkerPool = "frostfs worker pool"
+ FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained"
+ FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit"
+ FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract"
+ FrostFSDoubleMintEmissionDeclined = "double mint emission declined"
+ FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node"
+ FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached"
+ FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver"
+ FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw"
+ FrostFSCantCreateLockAccount = "can't create lock account"
+ FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw"
+ FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque"
+ FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract"
+ GovernanceNewEvent = "new event"
+ GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained"
+ GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync"
+ GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net"
+ GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain"
+ GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain"
+ GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed"
+ GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update"
+ GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee"
+ GovernanceFinishedAlphabetListUpdate = "finished alphabet list update"
+ GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain"
+ GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys"
+ GovernanceUpdateOfTheInnerRingList = "update of the inner ring list"
+ GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys"
+ GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain"
+ GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract"
+ NetmapNetmapWorkerPool = "netmap worker pool"
+ NetmapTick = "tick"
+ NetmapNetmapWorkerPoolDrained = "netmap worker pool drained"
+ NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled"
+ NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick"
+ NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node"
+ NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap"
+ NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState"
+ NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache"
+ NetmapCantGetEpochDuration = "can't get epoch duration"
+ NetmapCantGetTransactionHeight = "can't get transaction height"
+ NetmapCantResetEpochTimer = "can't reset epoch timer"
+ NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup"
+ NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick"
+ NetmapNextEpoch = "next epoch"
+ NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch"
+ NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification"
+ NetmapNonhaltNotaryTransaction = "non-halt notary transaction"
+ NetmapCantParseNetworkMapCandidate = "can't parse network map candidate"
+ NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate"
+ NetmapApprovingNetworkMapCandidate = "approving network map candidate"
+ NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer"
+ NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification"
+ NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state"
+ NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer"
+ FrostFSIRInternalError = "internal error"
+ FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server"
+ FrostFSIRApplicationStopped = "application stopped"
+ FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint"
+ FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint"
+ FrostFSIRReloadExtraWallets = "reload extra wallets"
+ FrostFSNodeStartListeningEndpoint = "start listening endpoint"
+ FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file"
+ FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint"
+ FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint"
+ FrostFSNodeStoppingGRPCServer = "stopping gRPC server..."
+ FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop"
+ FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully"
+ FrostFSNodeGRPCServerError = "gRPC server error"
+ FrostFSNodeGRPCReconnecting = "reconnecting gRPC server..."
+ FrostFSNodeGRPCReconnectedSuccessfully = "gRPC server reconnected successfully"
+ FrostFSNodeGRPCServerConfigNotFound = "gRPC server config not found"
+ FrostFSNodeGRPCReconnectFailed = "failed to reconnect gRPC server"
+ FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop"
+ FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance"
+ FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance"
+ FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine"
+ FrostFSNodeShardAttachedToEngine = "shard attached to engine"
+ FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..."
+ FrostFSNodeAccessPolicyEngineClosingFailure = "ape closing failure"
+ FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure"
+ FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed = "persistent rule storage db path is not set: in-memory will be used"
+ FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully"
+ FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state"
+ FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state"
+ FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..."
+ FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete"
+ FrostFSNodeInternalApplicationError = "internal application error"
+ FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete"
+ FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..."
+ FrostFSNodeSIGHUPSkip = "node is not ready for reconfiguration, skipped SIGHUP"
+ FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown"
+ FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing"
+ FrostFSNodeConfigurationReading = "configuration reading"
+ FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
+ FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
+ FrostFSNodePoolConfigurationUpdate = "adjust pool configuration"
+ FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying"
+ FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully"
+ FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
+ FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
+ FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
+ FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
+ FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
+ FrostFSNodeFailedInitTracing = "failed init tracing"
+ FrostFSNodeFailedShutdownTracing = "failed shutdown tracing"
+ FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client"
+ FrostFSNodeClosingMorphComponents = "closing morph components..."
+ FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global"
+ FrostFSNodeNotarySupport = "notary support"
+ FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network"
+ FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
+ FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
+ FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain"
+ FrostFSNodeNewBlock = "new block"
+ FrostFSNodeCantUpdatePersistentState = "can't update persistent state"
+ FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx"
+ FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch"
+ FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit"
+ FrostFSNodeInitialNetworkState = "initial network state"
+ FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization"
+ FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service"
+ FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container"
+ FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed"
+ FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)"
+ FrostFSNodePolicerIsDisabled = "policer is disabled"
+ CommonApplicationStarted = "application started"
+ ShardGCCollectingExpiredObjectsStarted = "collecting expired objects started"
+ ShardGCCollectingExpiredObjectsCompleted = "collecting expired objects completed"
+ ShardGCCollectingExpiredLocksStarted = "collecting expired locks started"
+ ShardGCCollectingExpiredLocksCompleted = "collecting expired locks completed"
+ ShardGCRemoveGarbageStarted = "garbage remove started"
+ ShardGCRemoveGarbageCompleted = "garbage remove completed"
+ EngineShardsEvacuationFailedToCount = "failed to get total objects count to evacuate"
+ EngineShardsEvacuationFailedToListObjects = "failed to list objects to evacuate"
+ EngineShardsEvacuationFailedToReadObject = "failed to read object to evacuate"
+ EngineShardsEvacuationFailedToMoveObject = "failed to evacuate object to other node"
+ ShardGCFailedToGetExpiredWithLinked = "failed to get expired objects with linked"
+ FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap = "the node is under maintenance, skip initial bootstrap"
+ EngineCouldNotChangeShardModeToDisabled = "could not change shard mode to disabled"
+ RPConnectionLost = "RPC connection lost, attempting reconnect"
+ RPCNodeSwitchFailure = "can't switch RPC node"
+ FSTreeCantUnmarshalObject = "can't unmarshal an object"
+ FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
+ FSTreeCantUpdateID = "can't update object storage ID"
+ PutSingleRedirectFailure = "failed to redirect PutSingle request"
+ StorageIDRetrievalFailure = "can't get storage ID from metabase"
+ ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
+ CandidateStatusPriority = "candidate status is different from the netmap status, the former takes priority"
+ TombstoneExpirationParseFailure = "tombstone getter: could not parse tombstone expiration epoch"
+ RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
+ RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
+ AttemtToCloseAlreadyClosedBlobovnicza = "attempt to close an already closed blobovnicza"
+ FailedToGetContainerCounters = "failed to get container counters values"
+ FailedToRebuildBlobstore = "failed to rebuild blobstore"
+ BlobstoreRebuildStarted = "blobstore rebuild started"
+ BlobstoreRebuildCompletedSuccessfully = "blobstore rebuild completed successfully"
+ BlobstoreRebuildStopped = "blobstore rebuild stopped"
+ BlobstorRebuildFailedToRebuildStorages = "failed to rebuild storages"
+ BlobstorRebuildRebuildStoragesCompleted = "storages rebuild completed"
+ BlobovniczaTreeCollectingDBToRebuild = "collecting blobovniczas to rebuild..."
+ BlobovniczaTreeCollectingDBToRebuildFailed = "collecting blobovniczas to rebuild failed"
+ BlobovniczaTreeCollectingDBToRebuildSuccess = "collecting blobovniczas to rebuild completed successfully"
+ BlobovniczaTreeRebuildingBlobovnicza = "rebuilding blobovnicza..."
+ BlobovniczaTreeRebuildingBlobovniczaFailed = "rebuilding blobovnicza failed"
+ BlobovniczaTreeRebuildingBlobovniczaSuccess = "rebuilding blobovnicza completed successfully"
+ BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza = "could not put move info to source blobovnicza"
+ BlobovniczatreeCouldNotUpdateStorageID = "could not update storage ID"
+ BlobovniczatreeCouldNotDropMoveInfo = "could not drop move info from source blobovnicza"
+ BlobovniczatreeCouldNotDeleteFromSource = "could not delete object from source blobovnicza"
+ BlobovniczaTreeCompletingPreviousRebuild = "completing previous rebuild if failed..."
+ BlobovniczaTreeCompletedPreviousRebuildSuccess = "previous rebuild completed successfully"
+ BlobovniczaTreeCompletedPreviousRebuildFailed = "failed to complete previous rebuild"
+ BlobovniczatreeCouldNotCheckExistenceInTargetDB = "could not check object existence in target blobovnicza"
+ BlobovniczatreeCouldNotPutObjectToTargetDB = "could not put object to target blobovnicza"
+ BlobovniczaSavingCountersToMeta = "saving counters to blobovnicza's meta..."
+ BlobovniczaSavingCountersToMetaSuccess = "saving counters to blobovnicza's meta completed successfully"
+ BlobovniczaSavingCountersToMetaFailed = "saving counters to blobovnicza's meta failed"
+ ObjectRemovalFailureExistsInWritecache = "can't remove object: object must be flushed from writecache"
+ FailedToReportStatusToSystemd = "failed to report status to systemd"
+ ShardGCCollectingExpiredMetricsStarted = "collecting expired metrics started"
+ ShardGCCollectingExpiredMetricsCompleted = "collecting expired metrics completed"
+ ShardGCFailedToCollectZeroSizeContainers = "failed to collect zero-size containers"
+ ShardGCFailedToCollectZeroCountContainers = "failed to collect zero-count containers"
+ EngineFailedToCheckContainerAvailability = "failed to check container availability"
+ EngineFailedToGetContainerSize = "failed to get container size"
+ EngineFailedToDeleteContainerSize = "failed to delete container size"
+ EngineInterruptProcessingZeroSizeContainers = "interrupt processing zero-size containers"
+ EngineInterruptProcessingZeroCountContainers = "interrupt processing zero-count containers"
+ EngineFailedToGetContainerCounters = "failed to get container counters"
+ GetSvcV2FailedToParseNodeEndpoints = "failed to parse node endpoints"
+ GetSvcV2FailedToParseNodeExternalAddresses = "failed to parse node external addresses"
+ GetSvcV2FailedToGetRangeHashFromNode = "failed to get range hash from node"
+ GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes = "failed to get range hash from all of container nodes"
+ FailedToUpdateShardID = "failed to update shard id"
+ EngineShardsEvacuationFailedToMoveTree = "failed to evacuate tree to other node"
+ EngineShardsEvacuationTreeEvacuatedLocal = "tree evacuated to local node"
+ EngineShardsEvacuationTreeEvacuatedRemote = "tree evacuated to other node"
+ EngineRefillFailedToGetObjectsCount = "failed to get blobstor objects count, no resync percent estimation is available"
+ ECFailedToSendToContainerNode = "failed to send EC object to container node"
+ ECFailedToSaveECPart = "failed to save EC part"
+ PolicerNodeIsNotECObjectNode = "current node is not EC object node"
+ PolicerFailedToGetLocalECChunks = "failed to get local EC chunks"
+ PolicerMissingECChunk = "failed to find EC chunk on any of the nodes"
+ PolicerFailedToDecodeECChunkID = "failed to decode EC chunk ID"
+ PolicerDifferentObjectIDForTheSameECChunk = "different object IDs for the same EC chunk"
+ ReplicatorCouldNotGetObjectFromRemoteStorage = "could not get object from remote storage"
+ ReplicatorCouldNotPutObjectToLocalStorage = "could not put object to local storage"
+ PolicerCouldNotGetObjectFromNodeMoving = "could not get EC object from the node, moving current chunk to the node"
+ PolicerCouldNotRestoreObjectNotEnoughChunks = "could not restore EC object: not enough chunks"
+ PolicerFailedToRestoreObject = "failed to restore EC object"
+ PolicerCouldNotGetChunk = "could not get EC chunk"
+ PolicerCouldNotGetChunks = "could not get EC chunks"
+ AuditEventLogRecord = "audit event log record"
+ StartedWritecacheSealAsync = "started writecache seal async"
+ WritecacheSealCompletedAsync = "writecache seal completed successfully"
+ FailedToSealWritecacheAsync = "failed to seal writecache async"
+ WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
+ BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
+ WritecacheCantGetObject = "can't get an object from fstree"
+ FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
+ FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
+ NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
+ FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag"
+ FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
+ WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
+ FailedToUpdateNetmapCandidates = "update netmap candidates failed"
+ UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used"
+)
diff --git a/internal/metrics/application.go b/internal/metrics/application.go
new file mode 100644
index 0000000000..53acf9b7f7
--- /dev/null
+++ b/internal/metrics/application.go
@@ -0,0 +1,22 @@
+package metrics
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type ApplicationInfo struct {
+ versionValue *prometheus.GaugeVec
+}
+
+func NewApplicationInfo(version string) *ApplicationInfo {
+ appInfo := &ApplicationInfo{
+ versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Name: "app_info",
+ Help: "General information about the application.",
+ }, []string{"version"}),
+ }
+ appInfo.versionValue.With(prometheus.Labels{"version": version})
+ return appInfo
+}
diff --git a/internal/metrics/blobovnicza.go b/internal/metrics/blobovnicza.go
new file mode 100644
index 0000000000..948272c881
--- /dev/null
+++ b/internal/metrics/blobovnicza.go
@@ -0,0 +1,213 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type BlobobvnizcaMetrics interface {
+ SetBlobobvnizcaTreeMode(shardID, path string, mode mode.ComponentMode)
+ CloseBlobobvnizcaTree(shardID, path string)
+ BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool)
+ AddBlobobvnizcaTreePut(shardID, path string, size int)
+ AddBlobobvnizcaTreeGet(shardID, path string, size int)
+
+ AddOpenBlobovniczaSize(shardID, path string, size uint64)
+ SubOpenBlobovniczaSize(shardID, path string, size uint64)
+
+ AddOpenBlobovniczaItems(shardID, path string, items uint64)
+ SubOpenBlobovniczaItems(shardID, path string, items uint64)
+
+ IncOpenBlobovniczaCount(shardID, path string)
+ DecOpenBlobovniczaCount(shardID, path string)
+
+ BlobovniczaTreeRebuildStatus(shardID, path, status string)
+ BlobovniczaTreeRebuildPercent(shardID, path string, value uint32)
+ BlobovniczaTreeObjectMoved(shardID, path string, d time.Duration)
+}
+
+type blobovnicza struct {
+ treeMode *shardIDPathModeValue
+ treeReqDuration *prometheus.HistogramVec
+ treePut *prometheus.CounterVec
+ treeGet *prometheus.CounterVec
+ treeOpenSize *prometheus.GaugeVec
+ treeOpenItems *prometheus.GaugeVec
+ treeOpenCounter *prometheus.GaugeVec
+ treeObjectMoveDuration *prometheus.HistogramVec
+ treeRebuildStatus *shardIDPathModeValue
+ treeRebuildPercent *prometheus.GaugeVec
+}
+
+func newBlobovnicza() *blobovnicza {
+ return &blobovnicza{
+ treeMode: newShardIDPathMode(blobovniczaTreeSubSystem, "mode", "Blobovnicza tree mode"),
+
+ treeReqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: blobovniczaTreeSubSystem,
+ Name: "request_duration_seconds",
+ Help: "Accumulated Blobovnicza tree request process duration",
+ }, []string{shardIDLabel, pathLabel, successLabel, methodLabel, withStorageIDLabel}),
+ treePut: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: blobovniczaTreeSubSystem,
+ Name: "put_bytes",
+ Help: "Accumulated payload size written to Blobovnicza tree",
+ }, []string{shardIDLabel, pathLabel}),
+ treeGet: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: blobovniczaTreeSubSystem,
+ Name: "get_bytes",
+ Help: "Accumulated payload size read from Blobovnicza tree",
+ }, []string{shardIDLabel, pathLabel}),
+ treeOpenSize: metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: blobovniczaTreeSubSystem,
+ Name: "open_blobovnicza_size_bytes",
+ Help: "Size of opened blobovniczas of Blobovnicza tree",
+ }, []string{shardIDLabel, pathLabel}),
+ treeOpenItems: metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: blobovniczaTreeSubSystem,
+ Name: "open_blobovnicza_items_total",
+ Help: "Count of items in opened blobovniczas of Blobovnicza tree",
+ }, []string{shardIDLabel, pathLabel}),
+ treeOpenCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: blobovniczaTreeSubSystem,
+ Name: "open_blobovnicza_count",
+ Help: "Count of opened blobovniczas of Blobovnicza tree",
+ }, []string{shardIDLabel, pathLabel}),
+ treeObjectMoveDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: blobovniczaTreeSubSystem,
+ Name: "object_move_duration_seconds",
+ Help: "Accumulated Blobovnicza tree object move duration",
+ }, []string{shardIDLabel, pathLabel}),
+ treeRebuildStatus: newShardIDPathMode(blobovniczaTreeSubSystem, "rebuild_status", "Blobovnicza tree rebuild status"),
+ treeRebuildPercent: metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: blobovniczaTreeSubSystem,
+ Name: "rebuild_complete_percent",
+ Help: "Percent of rebuild completeness",
+ }, []string{shardIDLabel, pathLabel}),
+ }
+}
+
+func (b *blobovnicza) SetBlobobvnizcaTreeMode(shardID, path string, mod mode.ComponentMode) {
+ b.treeMode.SetMode(shardID, path, mod.String())
+}
+
+func (b *blobovnicza) CloseBlobobvnizcaTree(shardID, path string) {
+ b.treeMode.SetMode(shardID, path, closedMode)
+ b.treeReqDuration.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+ b.treeGet.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+ b.treePut.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+ b.treeObjectMoveDuration.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+ b.treeRebuildPercent.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+ b.treeRebuildStatus.SetMode(shardID, path, undefinedStatus)
+}
+
+func (b *blobovnicza) BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) {
+ b.treeReqDuration.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ successLabel: strconv.FormatBool(success),
+ methodLabel: method,
+ withStorageIDLabel: withStorageID.String(),
+ }).Observe(d.Seconds())
+}
+
+func (b *blobovnicza) AddBlobobvnizcaTreePut(shardID, path string, size int) {
+ b.treePut.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Add(float64(size))
+}
+
+func (b *blobovnicza) AddBlobobvnizcaTreeGet(shardID, path string, size int) {
+ b.treeGet.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Add(float64(size))
+}
+
+func (b *blobovnicza) AddOpenBlobovniczaSize(shardID, path string, size uint64) {
+ b.treeOpenSize.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Add(float64(size))
+}
+
+func (b *blobovnicza) SubOpenBlobovniczaSize(shardID, path string, size uint64) {
+ b.treeOpenSize.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Sub(float64(size))
+}
+
+func (b *blobovnicza) IncOpenBlobovniczaCount(shardID, path string) {
+ b.treeOpenCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Inc()
+}
+
+func (b *blobovnicza) DecOpenBlobovniczaCount(shardID, path string) {
+ b.treeOpenCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Dec()
+}
+
+func (b *blobovnicza) AddOpenBlobovniczaItems(shardID, path string, items uint64) {
+ b.treeOpenItems.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Add(float64(items))
+}
+
+func (b *blobovnicza) SubOpenBlobovniczaItems(shardID, path string, items uint64) {
+ b.treeOpenItems.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Sub(float64(items))
+}
+
+func (b *blobovnicza) BlobovniczaTreeRebuildStatus(shardID, path, status string) {
+ b.treeRebuildStatus.SetMode(shardID, path, status)
+}
+
+func (b *blobovnicza) BlobovniczaTreeObjectMoved(shardID, path string, d time.Duration) {
+ b.treeObjectMoveDuration.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Observe(d.Seconds())
+}
+
+func (b *blobovnicza) BlobovniczaTreeRebuildPercent(shardID, path string, value uint32) {
+ b.treeRebuildPercent.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Set(float64(value))
+}
diff --git a/internal/metrics/blobstore.go b/internal/metrics/blobstore.go
new file mode 100644
index 0000000000..d9bb3f0296
--- /dev/null
+++ b/internal/metrics/blobstore.go
@@ -0,0 +1,87 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type BlobstoreMetrics interface {
+ SetMode(shardID string, readOnly bool)
+ Close(shardID string)
+
+ MethodDuration(shardID string, method string, d time.Duration, success bool, withStorageID NullBool)
+ AddPut(shardID string, size int)
+ AddGet(shardID string, size int)
+}
+
+type blobstoreMetrics struct {
+ mode *shardIDModeValue
+ reqDuration *prometheus.HistogramVec
+ put *prometheus.CounterVec
+ get *prometheus.CounterVec
+}
+
+func newBlobstoreMetrics() *blobstoreMetrics {
+ return &blobstoreMetrics{
+ mode: newShardIDMode(blobstoreSubSystem, "mode", "Blobstore mode value"),
+ reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: blobstoreSubSystem,
+ Name: "request_duration_seconds",
+ Help: "Accumulated Blobstore request process duration",
+ }, []string{shardIDLabel, successLabel, methodLabel, withStorageIDLabel}),
+ put: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: blobstoreSubSystem,
+ Name: "put_bytes",
+ Help: "Accumulated payload size written to Blobstore",
+ }, []string{shardIDLabel}),
+ get: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: blobstoreSubSystem,
+ Name: "get_bytes",
+ Help: "Accumulated payload size read from Blobstore",
+ }, []string{shardIDLabel}),
+ }
+}
+
+func (m *blobstoreMetrics) SetMode(shardID string, readOnly bool) {
+ m.mode.SetMode(shardID, modeFromBool(readOnly))
+}
+
+func (m *blobstoreMetrics) Close(shardID string) {
+ m.mode.SetMode(shardID, closedMode)
+ m.reqDuration.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ })
+ m.get.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ })
+ m.put.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ })
+}
+
+func (m *blobstoreMetrics) MethodDuration(shardID string, method string, d time.Duration, success bool, withStorageID NullBool) {
+ m.reqDuration.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ successLabel: strconv.FormatBool(success),
+ methodLabel: method,
+ withStorageIDLabel: withStorageID.String(),
+ }).Observe(d.Seconds())
+}
+
+func (m *blobstoreMetrics) AddPut(shardID string, size int) {
+ m.put.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ }).Add(float64(size))
+}
+
+func (m *blobstoreMetrics) AddGet(shardID string, size int) {
+ m.get.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ }).Add(float64(size))
+}
diff --git a/internal/metrics/cache.go b/internal/metrics/cache.go
new file mode 100644
index 0000000000..8181586e25
--- /dev/null
+++ b/internal/metrics/cache.go
@@ -0,0 +1,35 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var cacheRequests = metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: commonCacheSubsystem,
+ Name: "request_duration_seconds",
+ Help: "Accumulated common cache request process duration",
+}, []string{hitLabel, methodLabel, cacheLabel})
+
+type CacheMetrics struct {
+ cache string
+}
+
+// NewCacheMetrics returns new CacheMetrics instance for cache specified.
+func NewCacheMetrics(cache string) *CacheMetrics {
+ return &CacheMetrics{
+ cache: cache,
+ }
+}
+
+func (m *CacheMetrics) AddMethodDuration(method string, d time.Duration, hit bool) {
+ cacheRequests.With(prometheus.Labels{
+ hitLabel: strconv.FormatBool(hit),
+ methodLabel: method,
+ cacheLabel: m.cache,
+ }).Observe(d.Seconds())
+}
diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go
new file mode 100644
index 0000000000..9123541ff5
--- /dev/null
+++ b/internal/metrics/consts.go
@@ -0,0 +1,56 @@
+package metrics
+
+const (
+ namespace = "frostfs_node"
+ innerRingNamespace = "frostfs_ir"
+
+ fstreeSubSystem = "fstree"
+ blobstoreSubSystem = "blobstore"
+ blobovniczaTreeSubSystem = "blobovnicza_tree"
+ metabaseSubSystem = "metabase"
+ piloramaSubSystem = "pilorama"
+ engineSubsystem = "engine"
+ gcSubsystem = "garbage_collector"
+ innerRingSubsystem = "ir"
+ morphSubsystem = "morph"
+ morphCacheSubsystem = "morphcache"
+ objectSubsystem = "object"
+ replicatorSubsystem = "replicator"
+ stateSubsystem = "state"
+ treeServiceSubsystem = "treeservice"
+ writeCacheSubsystem = "writecache"
+ grpcServerSubsystem = "grpc_server"
+ policerSubsystem = "policer"
+ commonCacheSubsystem = "common_cache"
+ multinetSubsystem = "multinet"
+ qosSubsystem = "qos"
+
+ successLabel = "success"
+ shardIDLabel = "shard_id"
+ modeLabel = "mode"
+ pathLabel = "path"
+ methodLabel = "method"
+ withStorageIDLabel = "with_storage_id"
+ statusLabel = "status"
+ objectTypeLabel = "object_type"
+ typeLabel = "type"
+ notificationTypeLabel = "notification_type"
+ invokeTypeLabel = "invoke_type"
+ contractLabel = "contract"
+ containerIDLabelKey = "cid"
+ storageLabel = "storage"
+ operationLabel = "operation"
+ endpointLabel = "endpoint"
+ hitLabel = "hit"
+ cacheLabel = "cache"
+ sourceIPLabel = "source_ip"
+ ioTagLabel = "io_tag"
+
+ readWriteMode = "READ_WRITE"
+ readOnlyMode = "READ_ONLY"
+ closedMode = "CLOSED"
+
+ failedToDeleteStatus = "failed_to_delete"
+ deletedStatus = "deleted"
+ undefinedStatus = "undefined"
+)
diff --git a/internal/metrics/engine.go b/internal/metrics/engine.go
new file mode 100644
index 0000000000..1d01c95ed5
--- /dev/null
+++ b/internal/metrics/engine.go
@@ -0,0 +1,223 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type EngineMetrics interface {
+ AddMethodDuration(method string, d time.Duration)
+ AddToContainerSize(cnrID string, size int64)
+ DeleteContainerSize(cnrID string)
+ DeleteContainerCount(cnrID string)
+ IncErrorCounter(shardID string)
+ ClearErrorCounter(shardID string)
+ DeleteShardMetrics(shardID string)
+ AddToObjectCounter(shardID, objectType string, delta int)
+ SetObjectCounter(shardID, objectType string, v uint64)
+ AddToPayloadCounter(shardID string, size int64)
+ SetMode(shardID string, mode mode.Mode)
+ SetContainerObjectCounter(shardID, contID, objectType string, v uint64)
+ IncContainerObjectCounter(shardID, contID, objectType string)
+ SubContainerObjectCounter(shardID, contID, objectType string, v uint64)
+ IncRefillObjectsCount(shardID, path string, size int, success bool)
+ SetRefillPercent(shardID, path string, percent uint32)
+ SetRefillStatus(shardID, path, status string)
+ SetEvacuationInProgress(shardID string, value bool)
+
+ WriteCache() WriteCacheMetrics
+ GC() GCMetrics
+}
+
+type engineMetrics struct {
+ methodDuration *prometheus.HistogramVec
+ objectCounter *prometheus.GaugeVec
+ containerSize *prometheus.GaugeVec
+ payloadSize *prometheus.GaugeVec
+ errorCounter *prometheus.GaugeVec
+ mode *shardIDModeValue
+ contObjCounter *prometheus.GaugeVec
+
+ refillStatus *shardIDPathModeValue
+ refillObjCounter *prometheus.GaugeVec
+ refillPayloadCounter *prometheus.GaugeVec
+ refillPercentCounter *prometheus.GaugeVec
+ evacuationInProgress *shardIDModeValue
+
+ gc *gcMetrics
+ writeCache *writeCacheMetrics
+}
+
+func newEngineMetrics() *engineMetrics {
+ return &engineMetrics{
+ containerSize: newEngineGaugeVector("container_size_bytes", "Accumulated size of all objects in a container", []string{containerIDLabelKey}),
+ payloadSize: newEngineGaugeVector("payload_size_bytes", "Accumulated size of all objects in a shard", []string{shardIDLabel}),
+ errorCounter: newEngineGaugeVector("errors_total", "Shard's error counter", []string{shardIDLabel}),
+ methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: engineSubsystem,
+ Name: "request_duration_seconds",
+ Help: "Duration of Engine requests",
+ }, []string{methodLabel}),
+ objectCounter: newEngineGaugeVector("objects_total",
+ "Objects counters per shards. DEPRECATED: Will be deleted in next releasese, use frostfs_node_engine_container_objects_total metric.",
+ []string{shardIDLabel, typeLabel}),
+ gc: newGCMetrics(),
+ writeCache: newWriteCacheMetrics(),
+ mode: newShardIDMode(engineSubsystem, "mode_info", "Shard mode"),
+ contObjCounter: newEngineGaugeVector("container_objects_total", "Count of objects for each container", []string{shardIDLabel, containerIDLabelKey, typeLabel}),
+ refillStatus: newShardIDPathMode(engineSubsystem, "resync_metabase_status", "Resync from blobstore to metabase status"),
+ refillObjCounter: newEngineGaugeVector("resync_metabase_objects_total", "Count of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}),
+ refillPayloadCounter: newEngineGaugeVector("resync_metabase_objects_size_bytes", "Size of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}),
+ refillPercentCounter: newEngineGaugeVector("resync_metabase_complete_percent", "Percent of resynced from blobstore to metabase completeness", []string{shardIDLabel, pathLabel}),
+ evacuationInProgress: newShardIDMode(engineSubsystem, "evacuation_in_progress", "Shard evacuation in progress"),
+ }
+}
+
+func newEngineGaugeVector(name, help string, labels []string) *prometheus.GaugeVec {
+ return metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: engineSubsystem,
+ Name: name,
+ Help: help,
+ }, labels)
+}
+
+func (m *engineMetrics) AddMethodDuration(method string, d time.Duration) {
+ m.methodDuration.With(prometheus.Labels{
+ methodLabel: method,
+ }).Observe(d.Seconds())
+}
+
+func (m *engineMetrics) AddToContainerSize(cnrID string, size int64) {
+ m.containerSize.With(prometheus.Labels{containerIDLabelKey: cnrID}).Add(float64(size))
+}
+
+func (m *engineMetrics) DeleteContainerSize(cnrID string) {
+ m.containerSize.DeletePartialMatch(prometheus.Labels{containerIDLabelKey: cnrID})
+}
+
+func (m *engineMetrics) DeleteContainerCount(cnrID string) {
+ m.contObjCounter.DeletePartialMatch(prometheus.Labels{containerIDLabelKey: cnrID})
+}
+
+func (m *engineMetrics) AddToPayloadCounter(shardID string, size int64) {
+ m.payloadSize.With(prometheus.Labels{shardIDLabel: shardID}).Add(float64(size))
+}
+
+func (m *engineMetrics) IncErrorCounter(shardID string) {
+ m.errorCounter.With(prometheus.Labels{shardIDLabel: shardID}).Inc()
+}
+
+func (m *engineMetrics) ClearErrorCounter(shardID string) {
+ m.errorCounter.With(prometheus.Labels{shardIDLabel: shardID}).Set(0)
+}
+
+func (m *engineMetrics) DeleteShardMetrics(shardID string) {
+ m.errorCounter.Delete(prometheus.Labels{shardIDLabel: shardID})
+ m.payloadSize.Delete(prometheus.Labels{shardIDLabel: shardID})
+ m.objectCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+ m.contObjCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+ m.refillObjCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+ m.refillPayloadCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+ m.refillPercentCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+ m.mode.Delete(shardID)
+ m.refillStatus.DeleteByShardID(shardID)
+ m.evacuationInProgress.Delete(shardID)
+}
+
+func (m *engineMetrics) AddToObjectCounter(shardID, objectType string, delta int) {
+ m.objectCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ typeLabel: objectType,
+ },
+ ).Add(float64(delta))
+}
+
+func (m *engineMetrics) SetObjectCounter(shardID, objectType string, v uint64) {
+ m.objectCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ typeLabel: objectType,
+ },
+ ).Set(float64(v))
+}
+
+func (m *engineMetrics) SetContainerObjectCounter(shardID, contID, objectType string, v uint64) {
+ m.contObjCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ containerIDLabelKey: contID,
+ typeLabel: objectType,
+ },
+ ).Set(float64(v))
+}
+
+func (m *engineMetrics) IncContainerObjectCounter(shardID, contID, objectType string) {
+ m.contObjCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ containerIDLabelKey: contID,
+ typeLabel: objectType,
+ },
+ ).Inc()
+}
+
+func (m *engineMetrics) SubContainerObjectCounter(shardID, contID, objectType string, v uint64) {
+ m.contObjCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ containerIDLabelKey: contID,
+ typeLabel: objectType,
+ },
+ ).Sub(float64(v))
+}
+
+func (m *engineMetrics) SetMode(shardID string, mode mode.Mode) {
+ m.mode.SetMode(shardID, mode.String())
+}
+
+func (m *engineMetrics) WriteCache() WriteCacheMetrics {
+ return m.writeCache
+}
+
+func (m *engineMetrics) GC() GCMetrics {
+ return m.gc
+}
+
+func (m *engineMetrics) IncRefillObjectsCount(shardID, path string, size int, success bool) {
+ m.refillObjCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ successLabel: strconv.FormatBool(success),
+ },
+ ).Inc()
+ m.refillPayloadCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ successLabel: strconv.FormatBool(success),
+ },
+ ).Add(float64(size))
+}
+
+func (m *engineMetrics) SetRefillPercent(shardID, path string, percent uint32) {
+ m.refillPercentCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Set(float64(percent))
+}
+
+func (m *engineMetrics) SetRefillStatus(shardID, path, status string) {
+ m.refillStatus.SetMode(shardID, path, status)
+}
+
+func (m *engineMetrics) SetEvacuationInProgress(shardID string, value bool) {
+ m.evacuationInProgress.SetMode(shardID, strconv.FormatBool(value))
+}
diff --git a/internal/metrics/fstree.go b/internal/metrics/fstree.go
new file mode 100644
index 0000000000..ecd4352bbf
--- /dev/null
+++ b/internal/metrics/fstree.go
@@ -0,0 +1,93 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type FSTreeMetrics interface {
+ SetMode(shardID, path string, mode mode.ComponentMode)
+ Close(shardID, path string)
+
+ MethodDuration(shardID, path string, method string, d time.Duration, success bool)
+ AddGet(shardID, path string, size int)
+ AddPut(shardID, path string, size int)
+}
+
+type fstreeMetrics struct {
+ mode *shardIDPathModeValue
+ reqDuration *prometheus.HistogramVec
+ put *prometheus.CounterVec
+ get *prometheus.CounterVec
+}
+
+func newFSTreeMetrics() *fstreeMetrics {
+ return &fstreeMetrics{
+ mode: newShardIDPathMode(fstreeSubSystem, "mode", "FSTree mode value"),
+ reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: fstreeSubSystem,
+ Name: "request_duration_seconds",
+ Help: "Accumulated FSTree request process duration",
+ }, []string{shardIDLabel, successLabel, pathLabel, methodLabel}),
+ put: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: fstreeSubSystem,
+ Name: "put_bytes",
+ Help: "Accumulated payload size written to FSTree",
+ }, []string{shardIDLabel, pathLabel}),
+ get: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: fstreeSubSystem,
+ Name: "get_bytes",
+ Help: "Accumulated payload size read from FSTree",
+ }, []string{shardIDLabel, pathLabel}),
+ }
+}
+
+func (m *fstreeMetrics) SetMode(shardID, path string, mod mode.ComponentMode) {
+ m.mode.SetMode(shardID, path, mod.String())
+}
+
+func (m *fstreeMetrics) Close(shardID, path string) {
+ m.mode.SetMode(shardID, path, closedMode)
+ m.reqDuration.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+ m.get.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+ m.put.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+}
+
+func (m *fstreeMetrics) MethodDuration(shardID, path string, method string, d time.Duration, success bool) {
+ m.reqDuration.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ successLabel: strconv.FormatBool(success),
+ methodLabel: method,
+ }).Observe(d.Seconds())
+}
+
+func (m *fstreeMetrics) AddGet(shardID, path string, size int) {
+ m.get.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Add(float64(size))
+}
+
+func (m *fstreeMetrics) AddPut(shardID, path string, size int) {
+ m.put.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ }).Add(float64(size))
+}
diff --git a/internal/metrics/gc.go b/internal/metrics/gc.go
new file mode 100644
index 0000000000..53bfef0e5f
--- /dev/null
+++ b/internal/metrics/gc.go
@@ -0,0 +1,88 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type GCMetrics interface {
+ AddRunDuration(shardID string, d time.Duration, success bool)
+ AddDeletedCount(shardID string, deleted, failed uint64)
+ AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool, objectType string)
+ AddInhumedObjectCount(shardID string, count uint64, objectType string)
+}
+
+type gcMetrics struct {
+ runDuration *prometheus.CounterVec
+ deletedCounter *prometheus.CounterVec
+ expCollectDuration *prometheus.CounterVec
+ inhumedCounter *prometheus.CounterVec
+}
+
+func newGCMetrics() *gcMetrics {
+ return &gcMetrics{
+ runDuration: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: gcSubsystem,
+ Name: "delete_duration_seconds",
+ Help: "The total time of GC runs to delete objects from disk",
+ }, []string{shardIDLabel, successLabel}),
+ deletedCounter: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: gcSubsystem,
+ Name: "deleted_objects_total",
+ Help: "Total count of objects GC deleted or failed to delete from disk",
+ }, []string{shardIDLabel, statusLabel}),
+ expCollectDuration: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: gcSubsystem,
+ Name: "marking_duration_seconds",
+ Help: "The total time of GC runs to mark expired objects as removed",
+ }, []string{shardIDLabel, successLabel, objectTypeLabel}),
+ inhumedCounter: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: gcSubsystem,
+ Name: "marked_for_removal_objects_total",
+ Help: "Total count of expired objects GC marked to remove",
+ }, []string{shardIDLabel, objectTypeLabel}),
+ }
+}
+
+func (m *gcMetrics) AddRunDuration(shardID string, d time.Duration, success bool) {
+ m.runDuration.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ successLabel: strconv.FormatBool(success),
+ }).Add(d.Seconds())
+}
+
+func (m *gcMetrics) AddDeletedCount(shardID string, deleted, failed uint64) {
+ m.deletedCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ statusLabel: deletedStatus,
+ }).Add(float64(deleted))
+ m.deletedCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ statusLabel: failedToDeleteStatus,
+ }).Add(float64(failed))
+}
+
+func (m *gcMetrics) AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool, objectType string) {
+ m.expCollectDuration.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ successLabel: strconv.FormatBool(success),
+ objectTypeLabel: objectType,
+ }).Add(d.Seconds())
+}
+
+func (m *gcMetrics) AddInhumedObjectCount(shardID string, count uint64, objectType string) {
+ m.inhumedCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ objectTypeLabel: objectType,
+ }).Add(float64(count))
+}
diff --git a/internal/metrics/grpc.go b/internal/metrics/grpc.go
new file mode 100644
index 0000000000..a83f53998a
--- /dev/null
+++ b/internal/metrics/grpc.go
@@ -0,0 +1,35 @@
+package metrics
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type GrpcServerMetrics interface {
+ MarkHealthy(endpoint string)
+ MarkUnhealthy(endpoint string)
+}
+
+type grpcServerMetrics struct {
+ endpointHealth *prometheus.GaugeVec
+}
+
+func newGrpcServerMetrics() *grpcServerMetrics {
+ return &grpcServerMetrics{
+ endpointHealth: metrics.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: grpcServerSubsystem,
+ Name: "health",
+ Help: "GRPC Server Endpoint health",
+ }, []string{endpointLabel}),
+ }
+}
+
+func (m *grpcServerMetrics) MarkHealthy(endpoint string) {
+ m.endpointHealth.With(prometheus.Labels{endpointLabel: endpoint}).Set(float64(1))
+}
+
+func (m *grpcServerMetrics) MarkUnhealthy(endpoint string) {
+ m.endpointHealth.With(prometheus.Labels{endpointLabel: endpoint}).Set(float64(0))
+}
diff --git a/internal/metrics/innerring.go b/internal/metrics/innerring.go
new file mode 100644
index 0000000000..d0cb8131ff
--- /dev/null
+++ b/internal/metrics/innerring.go
@@ -0,0 +1,86 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/misc"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// InnerRingServiceMetrics contains metrics collected by inner ring.
+type InnerRingServiceMetrics struct {
+ epoch prometheus.Gauge
+ health prometheus.Gauge
+ eventDuration *prometheus.HistogramVec
+ morphCacheMetrics *morphCacheMetrics
+ logMetrics logger.LogMetrics
+ multinet *multinetMetrics
+ // nolint: unused
+ appInfo *ApplicationInfo
+}
+
+// NewInnerRingMetrics returns new instance of metrics collectors for inner ring.
+func NewInnerRingMetrics() *InnerRingServiceMetrics {
+ var (
+ epoch = metrics.NewGauge(prometheus.GaugeOpts{
+ Namespace: innerRingNamespace,
+ Subsystem: innerRingSubsystem,
+ Name: "epoch",
+ Help: "Current epoch as seen by inner-ring node.",
+ })
+ health = metrics.NewGauge(prometheus.GaugeOpts{
+ Namespace: innerRingNamespace,
+ Subsystem: innerRingSubsystem,
+ Name: "health",
+ Help: "Current inner-ring node state.",
+ })
+ eventDuration = metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: innerRingNamespace,
+ Subsystem: innerRingSubsystem,
+ Name: "event_duration_seconds",
+ Help: "Duration of processing of inner-ring events",
+ }, []string{typeLabel, successLabel})
+ )
+
+ return &InnerRingServiceMetrics{
+ epoch: epoch,
+ health: health,
+ eventDuration: eventDuration,
+ morphCacheMetrics: newMorphCacheMetrics(innerRingNamespace),
+ appInfo: NewApplicationInfo(misc.Version),
+ logMetrics: logger.NewLogMetrics(innerRingNamespace),
+ multinet: newMultinetMetrics(innerRingNamespace),
+ }
+}
+
+// SetEpoch updates epoch metrics.
+func (m *InnerRingServiceMetrics) SetEpoch(epoch uint64) {
+ m.epoch.Set(float64(epoch))
+}
+
+// SetHealth updates health metrics.
+func (m *InnerRingServiceMetrics) SetHealth(s int32) {
+ m.health.Set(float64(s))
+}
+
+func (m *InnerRingServiceMetrics) AddEvent(d time.Duration, typ string, success bool) {
+ m.eventDuration.With(prometheus.Labels{
+ typeLabel: typ,
+ successLabel: strconv.FormatBool(success),
+ }).Observe(d.Seconds())
+}
+
+func (m *InnerRingServiceMetrics) MorphCacheMetrics() MorphCacheMetrics {
+ return m.morphCacheMetrics
+}
+
+func (m *InnerRingServiceMetrics) LogMetrics() logger.LogMetrics {
+ return m.logMetrics
+}
+
+func (m *InnerRingServiceMetrics) Multinet() MultinetMetrics {
+ return m.multinet
+}
diff --git a/internal/metrics/metabase.go b/internal/metrics/metabase.go
new file mode 100644
index 0000000000..640c7f7217
--- /dev/null
+++ b/internal/metrics/metabase.go
@@ -0,0 +1,54 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type MetabaseMetrics interface {
+ SetMode(shardID, path string, mode string)
+ Close(shardID, path string)
+
+ MethodDuration(shardID, path string, method string, d time.Duration, success bool)
+}
+
+func newMetabaseMetrics() *metabaseMetrics {
+ return &metabaseMetrics{
+ mode: newShardIDPathMode(metabaseSubSystem, "mode", "Metabase mode"),
+ reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: metabaseSubSystem,
+ Name: "request_duration_seconds",
+ Help: "Accumulated Metabase request process duration",
+ }, []string{shardIDLabel, successLabel, pathLabel, methodLabel}),
+ }
+}
+
+type metabaseMetrics struct {
+ mode *shardIDPathModeValue
+ reqDuration *prometheus.HistogramVec
+}
+
+func (m *metabaseMetrics) SetMode(shardID, path string, mode string) {
+ m.mode.SetMode(shardID, path, mode)
+}
+
+func (m *metabaseMetrics) Close(shardID, path string) {
+ m.mode.SetMode(shardID, path, closedMode)
+ m.reqDuration.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+}
+
+func (m *metabaseMetrics) MethodDuration(shardID, path string, method string, d time.Duration, success bool) {
+ m.reqDuration.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ successLabel: strconv.FormatBool(success),
+ methodLabel: method,
+ }).Observe(d.Seconds())
+}
diff --git a/internal/metrics/mode.go b/internal/metrics/mode.go
new file mode 100644
index 0000000000..a9ac47acdd
--- /dev/null
+++ b/internal/metrics/mode.go
@@ -0,0 +1,89 @@
+package metrics
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type shardIDModeValue struct {
+ modeValue *prometheus.GaugeVec
+}
+
+func newShardIDMode(subsystem, name, help string) *shardIDModeValue {
+ return &shardIDModeValue{
+ modeValue: metrics.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ }, []string{shardIDLabel, modeLabel}),
+ }
+}
+
+func (m *shardIDModeValue) SetMode(shardID, mode string) {
+ m.modeValue.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ })
+
+ m.modeValue.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ modeLabel: mode,
+ }).Set(1)
+}
+
+func (m *shardIDModeValue) Delete(shardID string) {
+ m.modeValue.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ })
+}
+
+type shardIDPathModeValue struct {
+ modeValue *prometheus.GaugeVec
+}
+
+func newShardIDPathMode(subsystem, name, help string) *shardIDPathModeValue {
+ return &shardIDPathModeValue{
+ modeValue: metrics.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ }, []string{shardIDLabel, pathLabel, modeLabel}),
+ }
+}
+
+func (m *shardIDPathModeValue) SetMode(shardID, path, mode string) {
+ m.modeValue.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+
+ m.modeValue.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ modeLabel: mode,
+ }).Set(1)
+}
+
+func (m *shardIDPathModeValue) Delete(shardID, path string) {
+ m.modeValue.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ pathLabel: path,
+ })
+}
+
+func (m *shardIDPathModeValue) DeleteByShardID(shardID string) {
+ m.modeValue.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ })
+}
+
+func modeFromBool(readOnly bool) string {
+ modeValue := readWriteMode
+ if readOnly {
+ modeValue = readOnlyMode
+ }
+ return modeValue
+}
diff --git a/internal/metrics/morph.go b/internal/metrics/morph.go
new file mode 100644
index 0000000000..02d7517bcb
--- /dev/null
+++ b/internal/metrics/morph.go
@@ -0,0 +1,72 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type morphClientMetrics struct {
+ switchCount prometheus.Counter
+ lastBlock prometheus.Gauge
+ notificationCount *prometheus.CounterVec
+ invokeDuration *prometheus.HistogramVec
+}
+
+func newMorphClientMetrics() *morphClientMetrics {
+ return &morphClientMetrics{
+ switchCount: metrics.NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: morphSubsystem,
+ Name: "switches_total",
+ Help: "Number of endpoint switches",
+ }),
+ lastBlock: metrics.NewGauge(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: morphSubsystem,
+ Name: "last_block",
+ Help: "Index of the last received block",
+ }),
+ notificationCount: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: morphSubsystem,
+ Name: "notifications_total",
+ Help: "Number of notifications received by notification type",
+ }, []string{notificationTypeLabel}),
+ invokeDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: morphSubsystem,
+ Name: "invoke_duration_seconds",
+ Help: "Cummulative duration of contract invocations",
+ }, []string{invokeTypeLabel, contractLabel, methodLabel, successLabel}),
+ }
+}
+
+func (m *morphClientMetrics) IncSwitchCount() {
+ m.switchCount.Inc()
+}
+
+func (m *morphClientMetrics) SetLastBlock(index uint32) {
+ m.lastBlock.Set(float64(index))
+}
+
+func (m *morphClientMetrics) IncNotificationCount(typ string) {
+ m.notificationCount.With(
+ prometheus.Labels{
+ notificationTypeLabel: typ,
+ },
+ ).Inc()
+}
+
+func (m *morphClientMetrics) ObserveInvoke(typ string, contract string, method string, success bool, d time.Duration) {
+ m.invokeDuration.With(
+ prometheus.Labels{
+ invokeTypeLabel: typ,
+ contractLabel: contract,
+ methodLabel: method,
+ successLabel: strconv.FormatBool(success),
+ },
+ ).Observe(d.Seconds())
+}
diff --git a/internal/metrics/morphcache.go b/internal/metrics/morphcache.go
new file mode 100644
index 0000000000..388cb11e8b
--- /dev/null
+++ b/internal/metrics/morphcache.go
@@ -0,0 +1,39 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type MorphCacheMetrics interface {
+ AddMethodDuration(method string, success bool, d time.Duration)
+}
+
+type morphCacheMetrics struct {
+ methodDuration *prometheus.HistogramVec
+}
+
+var _ MorphCacheMetrics = (*morphCacheMetrics)(nil)
+
+func newMorphCacheMetrics(ns string) *morphCacheMetrics {
+ return &morphCacheMetrics{
+ methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: ns,
+ Subsystem: morphCacheSubsystem,
+ Name: "request_duration_seconds",
+ Help: "Morph cache request process duration",
+ }, []string{successLabel, methodLabel}),
+ }
+}
+
+func (m *morphCacheMetrics) AddMethodDuration(method string, success bool, d time.Duration) {
+ m.methodDuration.With(
+ prometheus.Labels{
+ successLabel: strconv.FormatBool(success),
+ methodLabel: method,
+ },
+ ).Observe(d.Seconds())
+}
diff --git a/internal/metrics/multinet.go b/internal/metrics/multinet.go
new file mode 100644
index 0000000000..6b1f99d46a
--- /dev/null
+++ b/internal/metrics/multinet.go
@@ -0,0 +1,35 @@
+package metrics
+
+import (
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type multinetMetrics struct {
+ dials *prometheus.GaugeVec
+}
+
+type MultinetMetrics interface {
+ Dial(sourceIP string, success bool)
+}
+
+func newMultinetMetrics(ns string) *multinetMetrics {
+ return &multinetMetrics{
+ dials: metrics.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: ns,
+ Subsystem: multinetSubsystem,
+ Name: "dial_count",
+ Help: "Dials count performed by multinet",
+ }, []string{sourceIPLabel, successLabel}),
+ }
+}
+
+func (m *multinetMetrics) Dial(sourceIP string, success bool) {
+ m.dials.With(prometheus.Labels{
+ sourceIPLabel: sourceIP,
+ successLabel: strconv.FormatBool(success),
+ }).Inc()
+}
diff --git a/internal/metrics/node.go b/internal/metrics/node.go
new file mode 100644
index 0000000000..8ade19eb27
--- /dev/null
+++ b/internal/metrics/node.go
@@ -0,0 +1,134 @@
+package metrics
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/misc"
+ morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type NodeMetrics struct {
+ engine *engineMetrics
+ state *stateMetrics
+ replicator *replicatorMetrics
+ objectService *objectServiceMetrics
+ treeService *treeServiceMetrics
+ epoch prometheus.Gauge
+ fstree *fstreeMetrics
+ blobstore *blobstoreMetrics
+ blobobvnizca *blobovnicza
+ metabase *metabaseMetrics
+ pilorama *piloramaMetrics
+ grpc *grpcServerMetrics
+ policer *policerMetrics
+ morphClient *morphClientMetrics
+ morphCache *morphCacheMetrics
+ log logger.LogMetrics
+ multinet *multinetMetrics
+ qos *QoSMetrics
+ // nolint: unused
+ appInfo *ApplicationInfo
+}
+
+func NewNodeMetrics() *NodeMetrics {
+ return &NodeMetrics{
+ objectService: newObjectServiceMetrics(),
+ engine: newEngineMetrics(),
+ state: newStateMetrics(),
+ replicator: newReplicatorMetrics(),
+ treeService: newTreeServiceMetrics(),
+ epoch: metrics.NewGauge(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: innerRingSubsystem,
+ Name: "epoch",
+ Help: "Current epoch as seen by inner-ring node.",
+ }),
+ fstree: newFSTreeMetrics(),
+ blobstore: newBlobstoreMetrics(),
+ blobobvnizca: newBlobovnicza(),
+ metabase: newMetabaseMetrics(),
+ pilorama: newPiloramaMetrics(),
+ grpc: newGrpcServerMetrics(),
+ policer: newPolicerMetrics(),
+ morphClient: newMorphClientMetrics(),
+ morphCache: newMorphCacheMetrics(namespace),
+ log: logger.NewLogMetrics(namespace),
+ appInfo: NewApplicationInfo(misc.Version),
+ multinet: newMultinetMetrics(namespace),
+ qos: newQoSMetrics(),
+ }
+}
+
+// SetEpoch updates epoch metric.
+func (m *NodeMetrics) SetEpoch(epoch uint64) {
+ m.epoch.Set(float64(epoch))
+}
+
+func (m *NodeMetrics) TreeService() TreeMetricsRegister {
+ return m.treeService
+}
+
+func (m *NodeMetrics) Replicator() ReplicatorMetrics {
+ return m.replicator
+}
+
+func (m *NodeMetrics) ObjectService() ObjectServiceMetrics {
+ return m.objectService
+}
+
+func (m *NodeMetrics) Engine() EngineMetrics {
+ return m.engine
+}
+
+func (m *NodeMetrics) State() StateMetrics {
+ return m.state
+}
+
+func (m *NodeMetrics) FSTree() FSTreeMetrics {
+ return m.fstree
+}
+
+func (m *NodeMetrics) Blobstore() BlobstoreMetrics {
+ return m.blobstore
+}
+
+func (m *NodeMetrics) BlobobvnizcaTreeMetrics() BlobobvnizcaMetrics {
+ return m.blobobvnizca
+}
+
+func (m *NodeMetrics) MetabaseMetrics() MetabaseMetrics {
+ return m.metabase
+}
+
+func (m *NodeMetrics) PiloramaMetrics() PiloramaMetrics {
+ return m.pilorama
+}
+
+func (m *NodeMetrics) GrpcServerMetrics() GrpcServerMetrics {
+ return m.grpc
+}
+
+func (m *NodeMetrics) PolicerMetrics() PolicerMetrics {
+ return m.policer
+}
+
+func (m *NodeMetrics) MorphClientMetrics() morphmetrics.Register {
+ return m.morphClient
+}
+
+func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics {
+ return m.morphCache
+}
+
+func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
+ return m.log
+}
+
+func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
+ return m.multinet
+}
+
+func (m *NodeMetrics) QoSMetrics() *QoSMetrics {
+ return m.qos
+}
diff --git a/internal/metrics/object.go b/internal/metrics/object.go
new file mode 100644
index 0000000000..e4f6dfde16
--- /dev/null
+++ b/internal/metrics/object.go
@@ -0,0 +1,60 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type ObjectServiceMetrics interface {
+ AddRequestDuration(method string, d time.Duration, success bool, ioTag string)
+ AddPayloadSize(method string, size int)
+}
+
+type objectServiceMetrics struct {
+ methodDuration *prometheus.HistogramVec
+ payloadCounter *prometheus.CounterVec
+ ioTagOpsCounter *prometheus.CounterVec
+}
+
+func newObjectServiceMetrics() *objectServiceMetrics {
+ return &objectServiceMetrics{
+ methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: objectSubsystem,
+ Name: "request_duration_seconds",
+ Help: "Object Service request process duration",
+ }, []string{methodLabel, successLabel}),
+ payloadCounter: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: objectSubsystem,
+ Name: "request_payload_bytes",
+ Help: "Object Service request payload",
+ }, []string{methodLabel}),
+ ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: objectSubsystem,
+ Name: "requests_total",
+ Help: "Count of requests for each IO tag",
+ }, []string{methodLabel, ioTagLabel}),
+ }
+}
+
+func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) {
+ m.methodDuration.With(prometheus.Labels{
+ methodLabel: method,
+ successLabel: strconv.FormatBool(success),
+ }).Observe(d.Seconds())
+ m.ioTagOpsCounter.With(prometheus.Labels{
+ ioTagLabel: ioTag,
+ methodLabel: method,
+ }).Inc()
+}
+
+func (m *objectServiceMetrics) AddPayloadSize(method string, size int) {
+ m.payloadCounter.With(prometheus.Labels{
+ methodLabel: method,
+ }).Add(float64(size))
+}
diff --git a/internal/metrics/pilorama.go b/internal/metrics/pilorama.go
new file mode 100644
index 0000000000..c669275fe0
--- /dev/null
+++ b/internal/metrics/pilorama.go
@@ -0,0 +1,53 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type PiloramaMetrics interface {
+ SetMode(shardID string, m mode.ComponentMode)
+ Close(shardID string)
+
+ AddMethodDuration(shardID string, method string, d time.Duration, success bool)
+}
+
+func newPiloramaMetrics() *piloramaMetrics {
+ return &piloramaMetrics{
+ mode: newShardIDMode(piloramaSubSystem, "mode", "Pilorama mode"),
+ reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: piloramaSubSystem,
+ Name: "request_duration_seconds",
+ Help: "Accumulated Pilorama request process duration",
+ }, []string{shardIDLabel, successLabel, methodLabel}),
+ }
+}
+
+type piloramaMetrics struct {
+ mode *shardIDModeValue
+ reqDuration *prometheus.HistogramVec
+}
+
+func (m *piloramaMetrics) SetMode(shardID string, mode mode.ComponentMode) {
+ m.mode.SetMode(shardID, mode.String())
+}
+
+func (m *piloramaMetrics) AddMethodDuration(shardID string, method string, d time.Duration, success bool) {
+ m.reqDuration.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ successLabel: strconv.FormatBool(success),
+ methodLabel: method,
+ }).Observe(d.Seconds())
+}
+
+func (m *piloramaMetrics) Close(shardID string) {
+ m.mode.SetMode(shardID, closedMode)
+ m.reqDuration.DeletePartialMatch(prometheus.Labels{
+ shardIDLabel: shardID,
+ })
+}
diff --git a/internal/metrics/policer.go b/internal/metrics/policer.go
new file mode 100644
index 0000000000..e4bdc944e4
--- /dev/null
+++ b/internal/metrics/policer.go
@@ -0,0 +1,29 @@
+package metrics
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type PolicerMetrics interface {
+ IncProcessedObjects()
+}
+
+type policerMetrics struct {
+ processedObjectsCounter prometheus.Counter
+}
+
+func newPolicerMetrics() *policerMetrics {
+ return &policerMetrics{
+ processedObjectsCounter: metrics.NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: policerSubsystem,
+ Name: "processed_objects_total",
+ Help: "Total number of objects processed by policer",
+ }),
+ }
+}
+
+func (m *policerMetrics) IncProcessedObjects() {
+ m.processedObjectsCounter.Inc()
+}
diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go
new file mode 100644
index 0000000000..be68781422
--- /dev/null
+++ b/internal/metrics/qos.go
@@ -0,0 +1,52 @@
+package metrics
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type QoSMetrics struct {
+ opsCounter *prometheus.GaugeVec
+}
+
+func newQoSMetrics() *QoSMetrics {
+ return &QoSMetrics{
+ opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: qosSubsystem,
+ Name: "operations_total",
+ Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
+ }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
+ }
+}
+
+func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) {
+ m.opsCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ operationLabel: operation,
+ ioTagLabel: tag,
+ typeLabel: "pending",
+ }).Set(float64(pending))
+ m.opsCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ operationLabel: operation,
+ ioTagLabel: tag,
+ typeLabel: "in_progress",
+ }).Set(float64(inProgress))
+ m.opsCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ operationLabel: operation,
+ ioTagLabel: tag,
+ typeLabel: "completed",
+ }).Set(float64(completed))
+ m.opsCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ operationLabel: operation,
+ ioTagLabel: tag,
+ typeLabel: "resource_exhausted",
+ }).Set(float64(resourceExhausted))
+}
+
+func (m *QoSMetrics) Close(shardID string) {
+ m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+}
diff --git a/internal/metrics/replicator.go b/internal/metrics/replicator.go
new file mode 100644
index 0000000000..ca72a30318
--- /dev/null
+++ b/internal/metrics/replicator.go
@@ -0,0 +1,61 @@
+package metrics
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type ReplicatorMetrics interface {
+ IncInFlightRequest()
+ DecInFlightRequest()
+ IncProcessedObjects()
+ AddPayloadSize(size int64)
+}
+
+type replicatorMetrics struct {
+ inFlightRequests prometheus.Gauge
+ processedObjects prometheus.Counter
+ totalReplicatedPayloadSize prometheus.Counter
+}
+
+func (m *replicatorMetrics) IncInFlightRequest() {
+ m.inFlightRequests.Inc()
+}
+
+func (m *replicatorMetrics) DecInFlightRequest() {
+ m.inFlightRequests.Dec()
+}
+
+func (m *replicatorMetrics) IncProcessedObjects() {
+ m.processedObjects.Inc()
+}
+
+func (m *replicatorMetrics) AddPayloadSize(size int64) {
+ m.totalReplicatedPayloadSize.Add(float64(size))
+}
+
+func newReplicatorMetrics() *replicatorMetrics {
+ return &replicatorMetrics{
+ inFlightRequests: newReplicatorGauge("in_flight_requests_total", "Number of in-flight requests"),
+ processedObjects: newReplicatorCounter("processed_objects_total", "Number of objects processed since the node startup"),
+ totalReplicatedPayloadSize: newReplicatorCounter("total_replicated_payload_size_bytes", "Total size of payloads replicated"),
+ }
+}
+
+func newReplicatorCounter(name, help string) prometheus.Counter {
+ return metrics.NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: replicatorSubsystem,
+ Name: name,
+ Help: help,
+ })
+}
+
+func newReplicatorGauge(name, help string) prometheus.Gauge {
+ return metrics.NewGauge(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: replicatorSubsystem,
+ Name: name,
+ Help: help,
+ })
+}
diff --git a/internal/metrics/state.go b/internal/metrics/state.go
new file mode 100644
index 0000000000..243f648e52
--- /dev/null
+++ b/internal/metrics/state.go
@@ -0,0 +1,29 @@
+package metrics
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type StateMetrics interface {
+ SetHealth(s int32)
+}
+
+type stateMetrics struct {
+ healthCheck prometheus.Gauge
+}
+
+func newStateMetrics() *stateMetrics {
+ return &stateMetrics{
+ healthCheck: metrics.NewGauge(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: stateSubsystem,
+ Name: "health",
+ Help: "Current Node state",
+ }),
+ }
+}
+
+func (m *stateMetrics) SetHealth(s int32) {
+ m.healthCheck.Set(float64(s))
+}
diff --git a/internal/metrics/treeservice.go b/internal/metrics/treeservice.go
new file mode 100644
index 0000000000..e192c4398e
--- /dev/null
+++ b/internal/metrics/treeservice.go
@@ -0,0 +1,79 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type TreeMetricsRegister interface {
+ AddReplicateTaskDuration(time.Duration, bool)
+ AddReplicateWaitDuration(time.Duration, bool)
+ AddSyncDuration(time.Duration, bool)
+ AddOperation(string, string)
+}
+
+type treeServiceMetrics struct {
+ replicateTaskDuration *prometheus.HistogramVec
+ replicateWaitDuration *prometheus.HistogramVec
+ syncOpDuration *prometheus.HistogramVec
+ ioTagOpsCounter *prometheus.CounterVec
+}
+
+var _ TreeMetricsRegister = (*treeServiceMetrics)(nil)
+
+func newTreeServiceMetrics() *treeServiceMetrics {
+ return &treeServiceMetrics{
+ replicateTaskDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: treeServiceSubsystem,
+ Name: "replicate_task_duration_seconds",
+ Help: "Duration of individual replication tasks executed as part of replication loops",
+ }, []string{successLabel}),
+ replicateWaitDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: treeServiceSubsystem,
+ Name: "replicate_wait_duration_seconds",
+ Help: "Duration of overall waiting time for replication loops",
+ }, []string{successLabel}),
+ syncOpDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: treeServiceSubsystem,
+ Name: "sync_duration_seconds",
+ Help: "Duration of synchronization operations",
+ }, []string{successLabel}),
+ ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: treeServiceSubsystem,
+ Name: "requests_total",
+ Help: "Count of requests for each IO tag",
+ }, []string{methodLabel, ioTagLabel}),
+ }
+}
+
+func (m *treeServiceMetrics) AddReplicateTaskDuration(d time.Duration, success bool) {
+ m.replicateTaskDuration.With(prometheus.Labels{
+ successLabel: strconv.FormatBool(success),
+ }).Observe(d.Seconds())
+}
+
+func (m *treeServiceMetrics) AddReplicateWaitDuration(d time.Duration, success bool) {
+ m.replicateWaitDuration.With(prometheus.Labels{
+ successLabel: strconv.FormatBool(success),
+ }).Observe(d.Seconds())
+}
+
+func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) {
+ m.syncOpDuration.With(prometheus.Labels{
+ successLabel: strconv.FormatBool(success),
+ }).Observe(d.Seconds())
+}
+
+func (m *treeServiceMetrics) AddOperation(op string, ioTag string) {
+ m.ioTagOpsCounter.With(prometheus.Labels{
+ ioTagLabel: ioTag,
+ methodLabel: op,
+ }).Inc()
+}
diff --git a/internal/metrics/types.go b/internal/metrics/types.go
new file mode 100644
index 0000000000..6a76248bfd
--- /dev/null
+++ b/internal/metrics/types.go
@@ -0,0 +1,17 @@
+package metrics
+
+import (
+ "strconv"
+)
+
+type NullBool struct {
+ Bool bool
+ Valid bool // Valid is true if Bool is not NULL
+}
+
+func (v NullBool) String() string {
+ if !v.Valid {
+ return ""
+ }
+ return strconv.FormatBool(v.Bool)
+}
diff --git a/internal/metrics/writecache.go b/internal/metrics/writecache.go
new file mode 100644
index 0000000000..1b708f7103
--- /dev/null
+++ b/internal/metrics/writecache.go
@@ -0,0 +1,108 @@
+package metrics
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type WriteCacheMetrics interface {
+ AddMethodDuration(shardID, path, storageType, method string, success bool, d time.Duration)
+ SetActualCount(shardID, path, storageType string, count uint64)
+ SetEstimateSize(shardID, path, storageType string, size uint64)
+ SetMode(shardID, mode string)
+ IncOperationCounter(shardID, path, storageType, operation string, success NullBool)
+ Close(shardID, path string)
+}
+
+type writeCacheMetrics struct {
+ methodDuration *prometheus.HistogramVec
+ operationCounter *prometheus.CounterVec
+
+ actualCount *prometheus.GaugeVec
+
+ estimatedSize *prometheus.GaugeVec
+
+ mode *shardIDModeValue
+}
+
+func newWriteCacheMetrics() *writeCacheMetrics {
+ return &writeCacheMetrics{
+ methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: writeCacheSubsystem,
+ Name: "request_duration_seconds",
+ Help: "Writecache request process duration",
+ }, []string{shardIDLabel, successLabel, storageLabel, methodLabel, pathLabel}),
+ operationCounter: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: writeCacheSubsystem,
+ Name: "operations_total",
+ Help: "The number of writecache operations processed",
+ }, []string{shardIDLabel, storageLabel, successLabel, operationLabel, pathLabel}),
+ actualCount: newWCGaugeVec("actual_objects_total", "Actual objects count in writecache", []string{shardIDLabel, storageLabel, pathLabel}),
+ estimatedSize: newWCGaugeVec("estimated_size_bytes", "Estimated writecache size", []string{shardIDLabel, storageLabel, pathLabel}),
+ mode: newShardIDMode(writeCacheSubsystem, "mode_info", "Writecache mode value"),
+ }
+}
+
+func (m *writeCacheMetrics) AddMethodDuration(shardID, path, storageType, method string, success bool, d time.Duration) {
+ m.methodDuration.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ successLabel: strconv.FormatBool(success),
+ storageLabel: storageType,
+ methodLabel: method,
+ pathLabel: path,
+ },
+ ).Observe(d.Seconds())
+}
+
+func (m *writeCacheMetrics) SetActualCount(shardID, path, storageType string, count uint64) {
+ m.actualCount.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ storageLabel: storageType,
+ pathLabel: path,
+ }).Set(float64(count))
+}
+
+func (m *writeCacheMetrics) SetEstimateSize(shardID, path, storageType string, size uint64) {
+ m.estimatedSize.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ storageLabel: storageType,
+ pathLabel: path,
+ }).Set(float64(size))
+}
+
+func (m *writeCacheMetrics) SetMode(shardID string, mode string) {
+ m.mode.SetMode(shardID, mode)
+}
+
+func (m *writeCacheMetrics) IncOperationCounter(shardID, path, storageType, operation string, success NullBool) {
+ m.operationCounter.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ storageLabel: storageType,
+ operationLabel: operation,
+ successLabel: success.String(),
+ pathLabel: path,
+ }).Inc()
+}
+
+func (m *writeCacheMetrics) Close(shardID, path string) {
+ m.mode.Delete(shardID)
+ m.methodDuration.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path})
+ m.operationCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path})
+ m.actualCount.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path})
+ m.estimatedSize.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path})
+}
+
+func newWCGaugeVec(name, help string, labels []string) *prometheus.GaugeVec {
+ return metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: writeCacheSubsystem,
+ Name: name,
+ Help: help,
+ }, labels)
+}
diff --git a/internal/net/config.go b/internal/net/config.go
new file mode 100644
index 0000000000..b84ac3b350
--- /dev/null
+++ b/internal/net/config.go
@@ -0,0 +1,69 @@
+package net
+
+import (
+ "errors"
+ "fmt"
+ "net/netip"
+ "slices"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+var errEmptySourceIPList = errors.New("empty source IP list")
+
+type Subnet struct {
+ Prefix string
+ SourceIPs []string
+}
+
+type Config struct {
+ Enabled bool
+ Subnets []Subnet
+ Balancer string
+ Restrict bool
+ FallbackDelay time.Duration
+ Metrics metrics.MultinetMetrics
+}
+
+func (c Config) toMultinetConfig() (multinet.Config, error) {
+ var subnets []multinet.Subnet
+ for _, s := range c.Subnets {
+ var ms multinet.Subnet
+ p, err := netip.ParsePrefix(s.Prefix)
+ if err != nil {
+ return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
+ }
+ ms.Prefix = p
+ for _, ip := range s.SourceIPs {
+ addr, err := netip.ParseAddr(ip)
+ if err != nil {
+ return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
+ }
+ ms.SourceIPs = append(ms.SourceIPs, addr)
+ }
+ if len(ms.SourceIPs) == 0 {
+ return multinet.Config{}, errEmptySourceIPList
+ }
+ subnets = append(subnets, ms)
+ }
+ return multinet.Config{
+ Subnets: subnets,
+ Balancer: multinet.BalancerType(c.Balancer),
+ Restrict: c.Restrict,
+ FallbackDelay: c.FallbackDelay,
+ Dialer: newDefaulDialer(),
+ EventHandler: newEventHandler(c.Metrics),
+ }, nil
+}
+
+func (c Config) equals(other Config) bool {
+ return c.Enabled == other.Enabled &&
+ slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
+ return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
+ }) &&
+ c.Balancer == other.Balancer &&
+ c.Restrict == other.Restrict &&
+ c.FallbackDelay == other.FallbackDelay
+}
diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go
new file mode 100644
index 0000000000..6265f18606
--- /dev/null
+++ b/internal/net/dial_target.go
@@ -0,0 +1,54 @@
+// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
+
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package net
+
+import (
+ "net/url"
+ "strings"
+)
+
+// parseDialTarget returns the network and address to pass to dialer.
+func parseDialTarget(target string) (string, string) {
+ net := "tcp"
+ m1 := strings.Index(target, ":")
+ m2 := strings.Index(target, ":/")
+ // handle unix:addr which will fail with url.Parse
+ if m1 >= 0 && m2 < 0 {
+ if n := target[0:m1]; n == "unix" {
+ return n, target[m1+1:]
+ }
+ }
+ if m2 >= 0 {
+ t, err := url.Parse(target)
+ if err != nil {
+ return net, target
+ }
+ scheme := t.Scheme
+ addr := t.Path
+ if scheme == "unix" {
+ if addr == "" {
+ addr = t.Host
+ }
+ return scheme, addr
+ }
+ }
+ return net, target
+}
diff --git a/internal/net/dialer.go b/internal/net/dialer.go
new file mode 100644
index 0000000000..daf0f815fa
--- /dev/null
+++ b/internal/net/dialer.go
@@ -0,0 +1,39 @@
+package net
+
+import (
+ "context"
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type Dialer interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) {
+ return d.DialContext(ctx, "tcp", address)
+}
+
+func newDefaulDialer() net.Dialer {
+ // From `grpc.WithContextDialer` comment:
+ //
+ // Note: All supported releases of Go (as of December 2023) override the OS
+ // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+ // with OS defaults for keepalive time and interval, use a net.Dialer that sets
+ // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
+ // option to true from the Control field. For a concrete example of how to do
+ // this, see internal.NetDialerWithTCPKeepalive().
+ //
+ // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
+ return net.Dialer{
+ KeepAlive: time.Duration(-1),
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go
new file mode 100644
index 0000000000..3d94dedc74
--- /dev/null
+++ b/internal/net/dialer_source.go
@@ -0,0 +1,83 @@
+package net
+
+import (
+ "context"
+ "net"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+type DialerSource struct {
+ guard sync.RWMutex
+
+ c Config
+
+ md multinet.Dialer
+}
+
+func NewDialerSource(c Config) (*DialerSource, error) {
+ result := &DialerSource{}
+ if err := result.build(c); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func (s *DialerSource) build(c Config) error {
+ if c.Enabled {
+ mc, err := c.toMultinetConfig()
+ if err != nil {
+ return err
+ }
+ md, err := multinet.NewDialer(mc)
+ if err != nil {
+ return err
+ }
+ s.md = md
+ s.c = c
+ return nil
+ }
+ s.md = nil
+ s.c = c
+ return nil
+}
+
+// GrpcContextDialer returns grpc.WithContextDialer func.
+// Returns nil if multinet disabled.
+func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
+ s.guard.RLock()
+ defer s.guard.RUnlock()
+
+ if s.c.Enabled {
+ return func(ctx context.Context, address string) (net.Conn, error) {
+ network, address := parseDialTarget(address)
+ return s.md.DialContext(ctx, network, address)
+ }
+ }
+ return nil
+}
+
+// NetContextDialer returns net.DialContext dial function.
+// Returns nil if multinet disabled.
+func (s *DialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
+ s.guard.RLock()
+ defer s.guard.RUnlock()
+
+ if s.c.Enabled {
+ return func(ctx context.Context, network, address string) (net.Conn, error) {
+ return s.md.DialContext(ctx, network, address)
+ }
+ }
+ return nil
+}
+
+func (s *DialerSource) Update(c Config) error {
+ s.guard.Lock()
+ defer s.guard.Unlock()
+
+ if s.c.equals(c) {
+ return nil
+ }
+ return s.build(c)
+}
diff --git a/internal/net/event_handler.go b/internal/net/event_handler.go
new file mode 100644
index 0000000000..024e5cf7ca
--- /dev/null
+++ b/internal/net/event_handler.go
@@ -0,0 +1,29 @@
+package net
+
+import (
+ "net"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+var _ multinet.EventHandler = (*metricsEventHandler)(nil)
+
+type metricsEventHandler struct {
+ m metrics.MultinetMetrics
+}
+
+func (m *metricsEventHandler) DialPerformed(sourceIP net.Addr, _ string, _ string, err error) {
+ sourceIPString := "undefined"
+ if sourceIP != nil {
+ sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
+ }
+ m.m.Dial(sourceIPString, err == nil)
+}
+
+func newEventHandler(m metrics.MultinetMetrics) multinet.EventHandler {
+ if m == nil {
+ return nil
+ }
+ return &metricsEventHandler{m: m}
+}
diff --git a/internal/qos/config.go b/internal/qos/config.go
new file mode 100644
index 0000000000..d90b403b56
--- /dev/null
+++ b/internal/qos/config.go
@@ -0,0 +1,31 @@
+package qos
+
+import (
+ "math"
+ "time"
+)
+
+const (
+ NoLimit int64 = math.MaxInt64
+ DefaultIdleTimeout = 5 * time.Minute
+)
+
+type LimiterConfig struct {
+ Read OpConfig
+ Write OpConfig
+}
+
+type OpConfig struct {
+ MaxWaitingOps int64
+ MaxRunningOps int64
+ IdleTimeout time.Duration
+ Tags []IOTagConfig
+}
+
+type IOTagConfig struct {
+ Tag string
+ Weight *float64
+ LimitOps *float64
+ ReservedOps *float64
+ Prohibited bool
+}
diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go
new file mode 100644
index 0000000000..58cd9e52cd
--- /dev/null
+++ b/internal/qos/grpc.go
@@ -0,0 +1,86 @@
+package qos
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "google.golang.org/grpc"
+)
+
+func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
+ ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String())
+ return handler(ctx, req)
+ }
+}
+
+func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor {
+ return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ rawTag, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ return invoker(ctx, method, req, reply, cc, opts...)
+ }
+ tag, err := FromRawString(rawTag)
+ if err != nil {
+ tag = IOTagClient
+ }
+ if tag.IsLocal() {
+ tag = IOTagInternal
+ }
+ ctx = tagging.ContextWithIOTag(ctx, tag.String())
+ return invoker(ctx, method, req, reply, cc, opts...)
+ }
+}
+
+func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor {
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ rawTag, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ return streamer(ctx, desc, cc, method, opts...)
+ }
+ tag, err := FromRawString(rawTag)
+ if err != nil {
+ tag = IOTagClient
+ }
+ if tag.IsLocal() {
+ tag = IOTagInternal
+ }
+ ctx = tagging.ContextWithIOTag(ctx, tag.String())
+ return streamer(ctx, desc, cc, method, opts...)
+ }
+}
+
+func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
+ if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() {
+ return handler(ctx, req)
+ }
+
+ release, ok := getLimiter().Acquire(info.FullMethod)
+ if !ok {
+ return nil, new(apistatus.ResourceExhausted)
+ }
+ defer release()
+
+ return handler(ctx, req)
+ }
+}
+
+//nolint:contextcheck (grpc.ServerStream manages the context itself)
+func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor {
+ return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() {
+ return handler(srv, ss)
+ }
+
+ release, ok := getLimiter().Acquire(info.FullMethod)
+ if !ok {
+ return new(apistatus.ResourceExhausted)
+ }
+ defer release()
+
+ return handler(srv, ss)
+ }
+}
diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go
new file mode 100644
index 0000000000..7d0826754d
--- /dev/null
+++ b/internal/qos/grpc_test.go
@@ -0,0 +1,219 @@
+package qos_test
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+)
+
+const (
+ okKey = "ok"
+)
+
+var (
+ errTest = errors.New("mock")
+ errWrongTag = errors.New("wrong tag")
+ errNoTag = errors.New("failed to get tag from context")
+ errResExhausted *apistatus.ResourceExhausted
+ tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync}
+)
+
+type mockGRPCServerStream struct {
+ grpc.ServerStream
+
+ ctx context.Context
+}
+
+func (m *mockGRPCServerStream) Context() context.Context {
+ return m.ctx
+}
+
+type limiter struct {
+ acquired bool
+ released bool
+}
+
+func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) {
+ l.acquired = true
+ if key != okKey {
+ return nil, false
+ }
+ return func() { l.released = true }, true
+}
+
+func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
+ interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim })
+ handler := func(ctx context.Context, req any) (any, error) {
+ return nil, errTest
+ }
+ _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler)
+ return err
+}
+
+func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
+ interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim })
+ handler := func(srv any, stream grpc.ServerStream) error {
+ return errTest
+ }
+ err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{
+ FullMethod: methodName,
+ }, handler)
+ return err
+}
+
+func Test_MaxActiveRPCLimiter(t *testing.T) {
+ // UnaryServerInterceptor
+ t.Run("unary fail", func(t *testing.T) {
+ var lim limiter
+
+ err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
+ require.ErrorAs(t, err, &errResExhausted)
+ require.True(t, lim.acquired)
+ require.False(t, lim.released)
+ })
+ t.Run("unary pass critical", func(t *testing.T) {
+ var lim limiter
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
+
+ err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
+ require.ErrorIs(t, err, errTest)
+ require.False(t, lim.acquired)
+ require.False(t, lim.released)
+ })
+ t.Run("unary pass", func(t *testing.T) {
+ var lim limiter
+
+ err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
+ require.ErrorIs(t, err, errTest)
+ require.True(t, lim.acquired)
+ require.True(t, lim.released)
+ })
+ // StreamServerInterceptor
+ t.Run("stream fail", func(t *testing.T) {
+ var lim limiter
+
+ err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
+ require.ErrorAs(t, err, &errResExhausted)
+ require.True(t, lim.acquired)
+ require.False(t, lim.released)
+ })
+ t.Run("stream pass critical", func(t *testing.T) {
+ var lim limiter
+ ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
+
+ err := streamMaxActiveRPCLimiter(ctx, &lim, "")
+ require.ErrorIs(t, err, errTest)
+ require.False(t, lim.acquired)
+ require.False(t, lim.released)
+ })
+ t.Run("stream pass", func(t *testing.T) {
+ var lim limiter
+
+ err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
+ require.ErrorIs(t, err, errTest)
+ require.True(t, lim.acquired)
+ require.True(t, lim.released)
+ })
+}
+
+func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
+ interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor()
+ called := false
+ handler := func(ctx context.Context, req any) (any, error) {
+ called = true
+ if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() {
+ return nil, nil
+ }
+ return nil, errWrongTag
+ }
+ _, err := interceptor(context.Background(), nil, nil, handler)
+ require.NoError(t, err)
+ require.True(t, called)
+}
+
+func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) {
+ interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor()
+
+ // check context with no value
+ called := false
+ invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
+ called = true
+ if _, ok := tagging.IOTagFromContext(ctx); ok {
+ return fmt.Errorf("%v: expected no IO tags", errWrongTag)
+ }
+ return nil
+ }
+ require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil))
+ require.True(t, called)
+
+ // check context for internal tag
+ targetTag := qos.IOTagInternal.String()
+ invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
+ raw, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ return errNoTag
+ }
+ if raw != targetTag {
+ return errWrongTag
+ }
+ return nil
+ }
+ for _, tag := range tags {
+ ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
+ require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
+ }
+
+ // check context for client tag
+ ctx := tagging.ContextWithIOTag(context.Background(), "")
+ targetTag = qos.IOTagClient.String()
+ require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
+}
+
+func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) {
+ interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor()
+
+ // check context with no value
+ called := false
+ streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ called = true
+ if _, ok := tagging.IOTagFromContext(ctx); ok {
+ return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag)
+ }
+ return nil, nil
+ }
+ _, err := interceptor(context.Background(), nil, nil, "", streamer, nil)
+ require.True(t, called)
+ require.NoError(t, err)
+
+ // check context for internal tag
+ targetTag := qos.IOTagInternal.String()
+ streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ raw, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ return nil, errNoTag
+ }
+ if raw != targetTag {
+ return nil, errWrongTag
+ }
+ return nil, nil
+ }
+ for _, tag := range tags {
+ ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
+ _, err := interceptor(ctx, nil, nil, "", streamer, nil)
+ require.NoError(t, err)
+ }
+
+ // check context for client tag
+ ctx := tagging.ContextWithIOTag(context.Background(), "")
+ targetTag = qos.IOTagClient.String()
+ _, err = interceptor(ctx, nil, nil, "", streamer, nil)
+ require.NoError(t, err)
+}
diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go
new file mode 100644
index 0000000000..2d7de32fc9
--- /dev/null
+++ b/internal/qos/limiter.go
@@ -0,0 +1,246 @@
+package qos
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+)
+
+const (
+ defaultIdleTimeout time.Duration = 0
+ defaultShare float64 = 1.0
+ minusOne = ^uint64(0)
+
+ defaultMetricsCollectTimeout = 5 * time.Second
+)
+
+type ReleaseFunc scheduling.ReleaseFunc
+
+type Limiter interface {
+ ReadRequest(context.Context) (ReleaseFunc, error)
+ WriteRequest(context.Context) (ReleaseFunc, error)
+ SetParentID(string)
+ SetMetrics(Metrics)
+ Close()
+}
+
+type scheduler interface {
+ RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error)
+ Close()
+}
+
+func NewLimiter(c LimiterConfig) (Limiter, error) {
+ if err := c.Validate(); err != nil {
+ return nil, err
+ }
+ readScheduler, err := createScheduler(c.Read)
+ if err != nil {
+ return nil, fmt.Errorf("create read scheduler: %w", err)
+ }
+ writeScheduler, err := createScheduler(c.Write)
+ if err != nil {
+ return nil, fmt.Errorf("create write scheduler: %w", err)
+ }
+ l := &mClockLimiter{
+ readScheduler: readScheduler,
+ writeScheduler: writeScheduler,
+ closeCh: make(chan struct{}),
+ wg: &sync.WaitGroup{},
+ readStats: createStats(),
+ writeStats: createStats(),
+ }
+ l.shardID.Store(&shardID{})
+ l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}})
+ l.startMetricsCollect()
+ return l, nil
+}
+
+func createScheduler(config OpConfig) (scheduler, error) {
+ if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit {
+ return newSemaphoreScheduler(config.MaxRunningOps), nil
+ }
+ return scheduling.NewMClock(
+ uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps),
+ converToSchedulingTags(config.Tags), config.IdleTimeout)
+}
+
+func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo {
+ result := make(map[string]scheduling.TagInfo)
+ for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
+ result[tag.String()] = scheduling.TagInfo{
+ Share: defaultShare,
+ }
+ }
+ for _, l := range limits {
+ v := result[l.Tag]
+ if l.Weight != nil && *l.Weight != 0 {
+ v.Share = *l.Weight
+ }
+ if l.LimitOps != nil && *l.LimitOps != 0 {
+ v.LimitIOPS = l.LimitOps
+ }
+ if l.ReservedOps != nil && *l.ReservedOps != 0 {
+ v.ReservedIOPS = l.ReservedOps
+ }
+ v.Prohibited = l.Prohibited
+ result[l.Tag] = v
+ }
+ return result
+}
+
+var (
+ _ Limiter = (*noopLimiter)(nil)
+ releaseStub ReleaseFunc = func() {}
+ noopLimiterInstance = &noopLimiter{}
+)
+
+func NewNoopLimiter() Limiter {
+ return noopLimiterInstance
+}
+
+type noopLimiter struct{}
+
+func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) {
+ return releaseStub, nil
+}
+
+func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) {
+ return releaseStub, nil
+}
+
+func (n *noopLimiter) SetParentID(string) {}
+
+func (n *noopLimiter) Close() {}
+
+func (n *noopLimiter) SetMetrics(Metrics) {}
+
+var _ Limiter = (*mClockLimiter)(nil)
+
+type shardID struct {
+ id string
+}
+
+type mClockLimiter struct {
+ readScheduler scheduler
+ writeScheduler scheduler
+
+ readStats map[string]*stat
+ writeStats map[string]*stat
+
+ shardID atomic.Pointer[shardID]
+ metrics atomic.Pointer[metricsHolder]
+ closeCh chan struct{}
+ wg *sync.WaitGroup
+}
+
+func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
+ return requestArrival(ctx, n.readScheduler, n.readStats)
+}
+
+func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
+ return requestArrival(ctx, n.writeScheduler, n.writeStats)
+}
+
+func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ tag, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ tag = IOTagClient.String()
+ }
+ stat := getStat(tag, stats)
+ stat.pending.Add(1)
+ if tag == IOTagCritical.String() {
+ stat.inProgress.Add(1)
+ return func() {
+ stat.completed.Add(1)
+ }, nil
+ }
+ rel, err := s.RequestArrival(ctx, tag)
+ stat.inProgress.Add(1)
+ if err != nil {
+ if isResourceExhaustedErr(err) {
+ stat.resourceExhausted.Add(1)
+ return nil, &apistatus.ResourceExhausted{}
+ }
+ stat.completed.Add(1)
+ return nil, err
+ }
+ return func() {
+ rel()
+ stat.completed.Add(1)
+ }, nil
+}
+
+func (n *mClockLimiter) Close() {
+ n.readScheduler.Close()
+ n.writeScheduler.Close()
+ close(n.closeCh)
+ n.wg.Wait()
+ n.metrics.Load().metrics.Close(n.shardID.Load().id)
+}
+
+func (n *mClockLimiter) SetParentID(parentID string) {
+ n.shardID.Store(&shardID{id: parentID})
+}
+
+func (n *mClockLimiter) SetMetrics(m Metrics) {
+ n.metrics.Store(&metricsHolder{metrics: m})
+}
+
+func (n *mClockLimiter) startMetricsCollect() {
+ n.wg.Add(1)
+ go func() {
+ defer n.wg.Done()
+
+ ticker := time.NewTicker(defaultMetricsCollectTimeout)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-n.closeCh:
+ return
+ case <-ticker.C:
+ shardID := n.shardID.Load().id
+ if shardID == "" {
+ continue
+ }
+ metrics := n.metrics.Load().metrics
+ exportMetrics(metrics, n.readStats, shardID, "read")
+ exportMetrics(metrics, n.writeStats, shardID, "write")
+ }
+ }
+ }()
+}
+
+func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) {
+ var pending uint64
+ var inProgress uint64
+ var completed uint64
+ var resExh uint64
+ for tag, s := range stats {
+ pending = s.pending.Load()
+ inProgress = s.inProgress.Load()
+ completed = s.completed.Load()
+ resExh = s.resourceExhausted.Load()
+ if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 {
+ continue
+ }
+ metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
+ }
+}
+
+func isResourceExhaustedErr(err error) bool {
+ return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
+ errors.Is(err, errSemaphoreLimitExceeded) ||
+ errors.Is(err, scheduling.ErrTagRequestsProhibited)
+}
diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go
new file mode 100644
index 0000000000..c00da51b71
--- /dev/null
+++ b/internal/qos/metrics.go
@@ -0,0 +1,31 @@
+package qos
+
+import "sync/atomic"
+
+type Metrics interface {
+ SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64)
+ Close(shardID string)
+}
+
+var _ Metrics = (*noopMetrics)(nil)
+
+type noopMetrics struct{}
+
+func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) {
+}
+
+func (n *noopMetrics) Close(string) {}
+
+// stat presents limiter statistics cumulative counters.
+//
+// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`.
+type stat struct {
+ completed atomic.Uint64
+ pending atomic.Uint64
+ resourceExhausted atomic.Uint64
+ inProgress atomic.Uint64
+}
+
+type metricsHolder struct {
+ metrics Metrics
+}
diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go
new file mode 100644
index 0000000000..74e6928f32
--- /dev/null
+++ b/internal/qos/semaphore.go
@@ -0,0 +1,39 @@
+package qos
+
+import (
+ "context"
+ "errors"
+
+ qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
+)
+
+var (
+ _ scheduler = (*semaphore)(nil)
+ errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded")
+)
+
+type semaphore struct {
+ s *qosSemaphore.Semaphore
+}
+
+func newSemaphoreScheduler(size int64) *semaphore {
+ return &semaphore{
+ s: qosSemaphore.NewSemaphore(size),
+ }
+}
+
+func (s *semaphore) Close() {}
+
+func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ if s.s.Acquire() {
+ return s.s.Release, nil
+ }
+ return nil, errSemaphoreLimitExceeded
+}
diff --git a/internal/qos/stats.go b/internal/qos/stats.go
new file mode 100644
index 0000000000..3ecfad9f92
--- /dev/null
+++ b/internal/qos/stats.go
@@ -0,0 +1,29 @@
+package qos
+
+const unknownStatsTag = "unknown"
+
+var statTags = map[string]struct{}{
+ IOTagBackground.String(): {},
+ IOTagClient.String(): {},
+ IOTagCritical.String(): {},
+ IOTagInternal.String(): {},
+ IOTagPolicer.String(): {},
+ IOTagTreeSync.String(): {},
+ IOTagWritecache.String(): {},
+ unknownStatsTag: {},
+}
+
+func createStats() map[string]*stat {
+ result := make(map[string]*stat)
+ for tag := range statTags {
+ result[tag] = &stat{}
+ }
+ return result
+}
+
+func getStat(tag string, stats map[string]*stat) *stat {
+ if v, ok := stats[tag]; ok {
+ return v
+ }
+ return stats[unknownStatsTag]
+}
diff --git a/internal/qos/tags.go b/internal/qos/tags.go
new file mode 100644
index 0000000000..e3f7cafd6f
--- /dev/null
+++ b/internal/qos/tags.go
@@ -0,0 +1,59 @@
+package qos
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+)
+
+type IOTag string
+
+const (
+ IOTagBackground IOTag = "background"
+ IOTagClient IOTag = "client"
+ IOTagCritical IOTag = "critical"
+ IOTagInternal IOTag = "internal"
+ IOTagPolicer IOTag = "policer"
+ IOTagTreeSync IOTag = "treesync"
+ IOTagWritecache IOTag = "writecache"
+
+ ioTagUnknown IOTag = ""
+)
+
+func FromRawString(s string) (IOTag, error) {
+ switch s {
+ case string(IOTagBackground):
+ return IOTagBackground, nil
+ case string(IOTagClient):
+ return IOTagClient, nil
+ case string(IOTagCritical):
+ return IOTagCritical, nil
+ case string(IOTagInternal):
+ return IOTagInternal, nil
+ case string(IOTagPolicer):
+ return IOTagPolicer, nil
+ case string(IOTagTreeSync):
+ return IOTagTreeSync, nil
+ case string(IOTagWritecache):
+ return IOTagWritecache, nil
+ default:
+ return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
+ }
+}
+
+func (t IOTag) String() string {
+ return string(t)
+}
+
+func IOTagFromContext(ctx context.Context) string {
+ tag, ok := tagging.IOTagFromContext(ctx)
+ if !ok {
+ tag = "undefined"
+ }
+ return tag
+}
+
+func (t IOTag) IsLocal() bool {
+ return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
+}
diff --git a/internal/qos/validate.go b/internal/qos/validate.go
new file mode 100644
index 0000000000..70f1f24e8e
--- /dev/null
+++ b/internal/qos/validate.go
@@ -0,0 +1,91 @@
+package qos
+
+import (
+ "errors"
+ "fmt"
+ "math"
+)
+
+var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any")
+
+type tagConfig struct {
+ Shares, Limit, Reserved *float64
+}
+
+func (c *LimiterConfig) Validate() error {
+ if err := validateOpConfig(c.Read); err != nil {
+ return fmt.Errorf("limits 'read' section validation error: %w", err)
+ }
+ if err := validateOpConfig(c.Write); err != nil {
+ return fmt.Errorf("limits 'write' section validation error: %w", err)
+ }
+ return nil
+}
+
+func validateOpConfig(c OpConfig) error {
+ if c.MaxRunningOps <= 0 {
+ return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps)
+ }
+ if c.MaxWaitingOps <= 0 {
+ return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps)
+ }
+ if c.IdleTimeout <= 0 {
+ return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String())
+ }
+ if err := validateTags(c.Tags); err != nil {
+ return fmt.Errorf("'tags' config section validation error: %w", err)
+ }
+ return nil
+}
+
+func validateTags(configTags []IOTagConfig) error {
+ tags := map[IOTag]tagConfig{
+ IOTagBackground: {},
+ IOTagClient: {},
+ IOTagInternal: {},
+ IOTagPolicer: {},
+ IOTagTreeSync: {},
+ IOTagWritecache: {},
+ }
+ for _, t := range configTags {
+ tag, err := FromRawString(t.Tag)
+ if err != nil {
+ return fmt.Errorf("invalid tag %s: %w", t.Tag, err)
+ }
+ if _, ok := tags[tag]; !ok {
+ return fmt.Errorf("tag %s is not configurable", t.Tag)
+ }
+ tags[tag] = tagConfig{
+ Shares: t.Weight,
+ Limit: t.LimitOps,
+ Reserved: t.ReservedOps,
+ }
+ }
+ idx := 0
+ var shares float64
+ for t, v := range tags {
+ if idx == 0 {
+ idx++
+ shares = float64Value(v.Shares)
+ } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) {
+ return errWeightsMustBeSpecified
+ }
+ if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) {
+ return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String())
+ }
+ if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) {
+ return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String())
+ }
+ if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) {
+ return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String())
+ }
+ }
+ return nil
+}
+
+func float64Value(f *float64) float64 {
+ if f == nil {
+ return 0.0
+ }
+ return *f
+}
diff --git a/pkg/ape/chainbase/boltdb.go b/pkg/ape/chainbase/boltdb.go
new file mode 100644
index 0000000000..005b3bd84d
--- /dev/null
+++ b/pkg/ape/chainbase/boltdb.go
@@ -0,0 +1,329 @@
+package chainbase
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "go.etcd.io/bbolt"
+)
+
+type boltLocalOverrideStorage struct {
+ *cfg
+
+ db *bbolt.DB
+}
+
+var chainBucket = []byte{0}
+
+var (
+ // ErrRootBucketNotFound signals the database has not been properly initialized.
+ ErrRootBucketNotFound = logicerr.New("root bucket not found")
+
+ ErrGlobalNamespaceBucketNotFound = logicerr.New("global namespace bucket not found")
+
+ ErrTargetTypeBucketNotFound = logicerr.New("target type bucket not found")
+
+ ErrTargetNameBucketNotFound = logicerr.New("target name bucket not found")
+
+ ErrBucketNotContainsChainID = logicerr.New("chain id not found in bucket")
+
+ errChainIDIsNotSet = errors.New("chain ID is not set")
+)
+
+// NewBoltLocalOverrideDatabase returns storage wrapper for storing access policy engine
+// local overrides.
+//
+// chain storage (chainBucket):
+// -> global namespace bucket (nBucket):
+// --> target bucket (tBucket)
+// ---> target name (resource) bucket (rBucket):
+//
+// | Key | Value |
+// x---------------------x-------------------x
+// | chain id (string) | serialized chain |
+// x---------------------x-------------------x
+//
+//nolint:godot
+func NewBoltLocalOverrideDatabase(opts ...Option) LocalOverrideDatabase {
+ c := defaultCfg()
+
+ for i := range opts {
+ opts[i](c)
+ }
+
+ return &boltLocalOverrideStorage{
+ cfg: c,
+ }
+}
+
+func (cs *boltLocalOverrideStorage) Init() error {
+ return cs.db.Update(func(tx *bbolt.Tx) error {
+ _, err := tx.CreateBucketIfNotExists(chainBucket)
+ return err
+ })
+}
+
+func (cs *boltLocalOverrideStorage) Open(context.Context) error {
+ err := util.MkdirAllX(filepath.Dir(cs.path), cs.perm)
+ if err != nil {
+ return fmt.Errorf("can't create dir %s for the chain DB: %w", cs.path, err)
+ }
+
+ opts := *bbolt.DefaultOptions
+ opts.NoSync = cs.noSync
+ opts.Timeout = 100 * time.Millisecond
+
+ cs.db, err = bbolt.Open(cs.path, cs.perm, &opts)
+ if err != nil {
+ return fmt.Errorf("can't open the chain DB: %w", err)
+ }
+
+ cs.db.MaxBatchSize = cs.maxBatchSize
+ cs.db.MaxBatchDelay = cs.maxBatchDelay
+
+ return nil
+}
+
+func (cs *boltLocalOverrideStorage) Close() error {
+ var err error
+ if cs.db != nil {
+ err = cs.db.Close()
+ }
+ return err
+}
+
+func getTypeBucket(tx *bbolt.Tx, name chain.Name, target policyengine.Target) (*bbolt.Bucket, error) {
+ cbucket := tx.Bucket(chainBucket)
+ if cbucket == nil {
+ return nil, ErrRootBucketNotFound
+ }
+
+ nbucket := cbucket.Bucket([]byte(name))
+ if nbucket == nil {
+ return nil, fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrGlobalNamespaceBucketNotFound, name)
+ }
+ return nbucket.Bucket([]byte{byte(target.Type)}), nil
+}
+
+func normalizeTargetName(target *policyengine.Target) {
+ if target.Type == policyengine.Namespace && target.Name == "" {
+ target.Name = "root"
+ }
+}
+
+func getTargetBucket(tx *bbolt.Tx, name chain.Name, target policyengine.Target) (*bbolt.Bucket, error) {
+ typeBucket, err := getTypeBucket(tx, name, target)
+ if err != nil {
+ return nil, err
+ }
+ if typeBucket == nil {
+ return nil, fmt.Errorf("%w: %w: %c", policyengine.ErrChainNotFound, ErrTargetTypeBucketNotFound, target.Type)
+ }
+
+ normalizeTargetName(&target)
+ rbucket := typeBucket.Bucket([]byte(target.Name))
+ if rbucket == nil {
+ return nil, fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrTargetNameBucketNotFound, target.Name)
+ }
+ return rbucket, nil
+}
+
+func getTargetBucketCreateIfEmpty(tx *bbolt.Tx, name chain.Name, target policyengine.Target) (*bbolt.Bucket, error) {
+ cbucket := tx.Bucket(chainBucket)
+ if cbucket == nil {
+ return nil, ErrRootBucketNotFound
+ }
+
+ nbucket := cbucket.Bucket([]byte(name))
+ if nbucket == nil {
+ var err error
+ nbucket, err = cbucket.CreateBucket([]byte(name))
+ if err != nil {
+ return nil, fmt.Errorf("could not create a bucket for the global chain name %s: %w", name, err)
+ }
+ }
+
+ typeBucket := nbucket.Bucket([]byte{byte(target.Type)})
+ if typeBucket == nil {
+ var err error
+ typeBucket, err = nbucket.CreateBucket([]byte{byte(target.Type)})
+ if err != nil {
+ return nil, fmt.Errorf("could not create a bucket for the target type '%c': %w", target.Type, err)
+ }
+ }
+
+ normalizeTargetName(&target)
+ rbucket := typeBucket.Bucket([]byte(target.Name))
+ if rbucket == nil {
+ var err error
+ rbucket, err = typeBucket.CreateBucket([]byte(target.Name))
+ if err != nil {
+ return nil, fmt.Errorf("could not create a bucket for the target name %s: %w", target.Name, err)
+ }
+ }
+
+ return rbucket, nil
+}
+
+func (cs *boltLocalOverrideStorage) AddOverride(name chain.Name, target policyengine.Target, c *chain.Chain) (chain.ID, error) {
+ if len(c.ID) == 0 {
+ return chain.ID{}, errChainIDIsNotSet
+ }
+
+ serializedChain := c.Bytes()
+
+ err := cs.db.Update(func(tx *bbolt.Tx) error {
+ rbuck, err := getTargetBucketCreateIfEmpty(tx, name, target)
+ if err != nil {
+ return err
+ }
+ return rbuck.Put([]byte(c.ID), serializedChain)
+ })
+
+ return c.ID, err
+}
+
+func (cs *boltLocalOverrideStorage) GetOverride(name chain.Name, target policyengine.Target, chainID chain.ID) (*chain.Chain, error) {
+ var serializedChain []byte
+
+ if err := cs.db.View(func(tx *bbolt.Tx) error {
+ rbuck, err := getTargetBucket(tx, name, target)
+ if err != nil {
+ return err
+ }
+ serializedChain = rbuck.Get([]byte(chainID))
+ if serializedChain == nil {
+ return fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrBucketNotContainsChainID, chainID)
+ }
+ serializedChain = bytes.Clone(serializedChain)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ c := &chain.Chain{}
+ if err := c.DecodeBytes(serializedChain); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+func (cs *boltLocalOverrideStorage) RemoveOverride(name chain.Name, target policyengine.Target, chainID chain.ID) error {
+ return cs.db.Update(func(tx *bbolt.Tx) error {
+ rbuck, err := getTargetBucket(tx, name, target)
+ if err != nil {
+ return err
+ }
+ return rbuck.Delete([]byte(chainID))
+ })
+}
+
+func (cs *boltLocalOverrideStorage) RemoveOverridesByTarget(name chain.Name, target policyengine.Target) error {
+ return cs.db.Update(func(tx *bbolt.Tx) error {
+ typeBucket, err := getTypeBucket(tx, name, target)
+ if err != nil {
+ return err
+ }
+ normalizeTargetName(&target)
+ return typeBucket.DeleteBucket([]byte(target.Name))
+ })
+}
+
+func (cs *boltLocalOverrideStorage) ListOverrides(name chain.Name, target policyengine.Target) ([]*chain.Chain, error) {
+ var serializedChains [][]byte
+ var serializedChain []byte
+ if err := cs.db.View(func(tx *bbolt.Tx) error {
+ rbuck, err := getTargetBucket(tx, name, target)
+ if err != nil {
+ return err
+ }
+ return rbuck.ForEach(func(_, v []byte) error {
+ serializedChain = bytes.Clone(v)
+ serializedChains = append(serializedChains, serializedChain)
+ return nil
+ })
+ }); err != nil {
+ if errors.Is(err, policyengine.ErrChainNotFound) {
+ return []*chain.Chain{}, nil
+ }
+ return nil, err
+ }
+ chains := make([]*chain.Chain, 0, len(serializedChains))
+ for _, serializedChain = range serializedChains {
+ c := &chain.Chain{}
+ if err := c.DecodeBytes(serializedChain); err != nil {
+ return nil, err
+ }
+ chains = append(chains, c)
+ }
+ return chains, nil
+}
+
+func (cs *boltLocalOverrideStorage) DropAllOverrides(name chain.Name) error {
+ return cs.db.Update(func(tx *bbolt.Tx) error {
+ cbucket := tx.Bucket(chainBucket)
+ if cbucket == nil {
+ return ErrRootBucketNotFound
+ }
+
+ nbucket := cbucket.Bucket([]byte(name))
+ if nbucket == nil {
+ return fmt.Errorf("%w: %w: global namespace %s", policyengine.ErrChainNotFound, ErrGlobalNamespaceBucketNotFound, name)
+ }
+
+ return tx.DeleteBucket([]byte(name))
+ })
+}
+
+func (cs *boltLocalOverrideStorage) ListOverrideDefinedTargets(name chain.Name) ([]policyengine.Target, error) {
+ var targets []policyengine.Target
+ if err := cs.db.View(func(tx *bbolt.Tx) error {
+ var err error
+ targets, err = getTargets(tx, name)
+ if err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return targets, nil
+}
+
+func getTargets(tx *bbolt.Tx, name chain.Name) ([]policyengine.Target, error) {
+ var targets []policyengine.Target
+ cbucket := tx.Bucket(chainBucket)
+ if cbucket == nil {
+ return nil, ErrRootBucketNotFound
+ }
+
+ nbucket := cbucket.Bucket([]byte(name))
+ if nbucket == nil {
+ return nil, fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrGlobalNamespaceBucketNotFound, name)
+ }
+
+ if err := nbucket.ForEachBucket(func(k []byte) error {
+ ttype := policyengine.TargetType(k[0])
+ if err := nbucket.Bucket(k).ForEachBucket(func(k []byte) error {
+ targets = append(targets, policyengine.Target{
+ Type: ttype,
+ Name: string(bytes.Clone(k)),
+ })
+ return nil
+ }); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return targets, nil
+}
diff --git a/pkg/ape/chainbase/inmemory.go b/pkg/ape/chainbase/inmemory.go
new file mode 100644
index 0000000000..27712d9598
--- /dev/null
+++ b/pkg/ape/chainbase/inmemory.go
@@ -0,0 +1,30 @@
+package chainbase
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+)
+
+type inmemoryLocalOverrideStorage struct {
+ engine.LocalOverrideStorage
+}
+
+func NewInmemoryLocalOverrideDatabase() LocalOverrideDatabase {
+ return &inmemoryLocalOverrideStorage{
+ LocalOverrideStorage: inmemory.NewInmemoryLocalStorage(),
+ }
+}
+
+func (cs *inmemoryLocalOverrideStorage) Init() error {
+ return nil
+}
+
+func (cs *inmemoryLocalOverrideStorage) Open(_ context.Context) error {
+ return nil
+}
+
+func (cs *inmemoryLocalOverrideStorage) Close() error {
+ return nil
+}
diff --git a/pkg/ape/chainbase/interface.go b/pkg/ape/chainbase/interface.go
new file mode 100644
index 0000000000..ee445f22cc
--- /dev/null
+++ b/pkg/ape/chainbase/interface.go
@@ -0,0 +1,22 @@
+package chainbase
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+)
+
+// DatabaseCore interface provides methods to initialize and manage local override storage
+// as database.
+type DatabaseCore interface {
+ Init() error
+ Open(context.Context) error
+ Close() error
+}
+
+// LocalOverrideDatabase interface provides methods to manage local override storage
+// as database and as the APE's local override storage.
+type LocalOverrideDatabase interface {
+ DatabaseCore
+ engine.LocalOverrideStorage
+}
diff --git a/pkg/ape/chainbase/option.go b/pkg/ape/chainbase/option.go
new file mode 100644
index 0000000000..590b7a885c
--- /dev/null
+++ b/pkg/ape/chainbase/option.go
@@ -0,0 +1,57 @@
+package chainbase
+
+import (
+ "io/fs"
+ "os"
+ "time"
+
+ "go.etcd.io/bbolt"
+)
+
+type Option func(*cfg)
+
+type cfg struct {
+ path string
+ perm fs.FileMode
+ noSync bool
+ maxBatchDelay time.Duration
+ maxBatchSize int
+}
+
+func defaultCfg() *cfg {
+ return &cfg{
+ perm: os.ModePerm,
+ maxBatchDelay: bbolt.DefaultMaxBatchDelay,
+ maxBatchSize: bbolt.DefaultMaxBatchSize,
+ }
+}
+
+func WithPath(path string) Option {
+ return func(c *cfg) {
+ c.path = path
+ }
+}
+
+func WithPerm(perm fs.FileMode) Option {
+ return func(c *cfg) {
+ c.perm = perm
+ }
+}
+
+func WithNoSync(noSync bool) Option {
+ return func(c *cfg) {
+ c.noSync = noSync
+ }
+}
+
+func WithMaxBatchDelay(maxBatchDelay time.Duration) Option {
+ return func(c *cfg) {
+ c.maxBatchDelay = maxBatchDelay
+ }
+}
+
+func WithMaxBatchSize(maxBatchSize int) Option {
+ return func(c *cfg) {
+ c.maxBatchSize = maxBatchSize
+ }
+}
diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go
new file mode 100644
index 0000000000..8cbb1cce98
--- /dev/null
+++ b/pkg/ape/contract_storage/proxy.go
@@ -0,0 +1,126 @@
+package contractstorage
+
+import (
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ policy_morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/notary"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+)
+
+type ProxyAdaptedContractStorage interface {
+ AddMorphRuleChain(name chain.Name, target engine.Target, c *chain.Chain) (util.Uint256, uint32, error)
+
+ RemoveMorphRuleChain(name chain.Name, target engine.Target, chainID chain.ID) (util.Uint256, uint32, error)
+
+ ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error)
+}
+
+var _ ProxyAdaptedContractStorage = (engine.MorphRuleChainStorage)(nil)
+
+type RPCActorProvider interface {
+ GetRPCActor() actor.RPCActor
+}
+
+// ProxyVerificationContractStorage uses decorated MorphRuleChainStorage with actor where cosigner is a proxy contract.
+type ProxyVerificationContractStorage struct {
+ rpcActorProvider RPCActorProvider
+
+ cosigners []actor.SignerAccount
+
+ policyScriptHash util.Uint160
+}
+
+var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil)
+
+func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage {
+ acc := wallet.NewAccountFromPrivateKey(key)
+ return &ProxyVerificationContractStorage{
+ rpcActorProvider: rpcActorProvider,
+
+ cosigners: []actor.SignerAccount{
+ {
+ Signer: transaction.Signer{
+ Account: proxyScriptHash,
+ Scopes: transaction.CustomContracts,
+ AllowedContracts: []util.Uint160{policyScriptHash},
+ },
+ Account: notary.FakeContractAccount(proxyScriptHash),
+ },
+ {
+ Signer: transaction.Signer{
+ Account: acc.Contract.ScriptHash(),
+ Scopes: transaction.CalledByEntry,
+ },
+ Account: acc,
+ },
+ },
+
+ policyScriptHash: policyScriptHash,
+ }
+}
+
+// contractStorageActorAdapter adapats *actor.Actor to policy_morph.ContractStorageActor interface.
+type contractStorageActorAdapter struct {
+ *actor.Actor
+ rpcActor invoker.RPCInvoke
+}
+
+func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke {
+ return n.rpcActor
+}
+
+func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) {
+ rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
+ act, err := actor.New(rpcActor, contractStorage.cosigners)
+ if err != nil {
+ return nil, err
+ }
+ return &contractStorageActorAdapter{
+ Actor: act,
+ rpcActor: rpcActor,
+ }, nil
+}
+
+// AddMorphRuleChain add morph rule chain to Policy contract using both Proxy contract and storage account as consigners.
+func (contractStorage *ProxyVerificationContractStorage) AddMorphRuleChain(name chain.Name, target engine.Target, c *chain.Chain) (util.Uint256, uint32, error) {
+ // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
+ // ProxyVerificationContractStorage does not manage reconnections.
+ contractStorageActor, err := contractStorage.newContractStorageActor()
+ if err != nil {
+ return util.Uint256{}, 0, err
+ }
+ return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).AddMorphRuleChain(name, target, c)
+}
+
+// RemoveMorphRuleChain removes morph rule chain from Policy contract using both Proxy contract and storage account as consigners.
+func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(name chain.Name, target engine.Target, chainID chain.ID) (util.Uint256, uint32, error) {
+ // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
+ // ProxyVerificationContractStorage does not manage reconnections.
+ contractStorageActor, err := contractStorage.newContractStorageActor()
+ if err != nil {
+ return util.Uint256{}, 0, err
+ }
+ return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).RemoveMorphRuleChain(name, target, chainID)
+}
+
+// ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners.
+func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
+ rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
+ inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor}
+ return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
+}
+
+type invokerAdapter struct {
+ *invoker.Invoker
+ rpcInvoker invoker.RPCInvoke
+}
+
+func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
+ return n.rpcInvoker
+}
diff --git a/pkg/ape/converter/converter.go b/pkg/ape/converter/converter.go
new file mode 100644
index 0000000000..9032680af6
--- /dev/null
+++ b/pkg/ape/converter/converter.go
@@ -0,0 +1,44 @@
+package converter
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+)
+
+func SchemaRoleFromACLRole(role acl.Role) (string, error) {
+ switch role {
+ case acl.RoleOwner:
+ return nativeschema.PropertyValueContainerRoleOwner, nil
+ case acl.RoleContainer:
+ return nativeschema.PropertyValueContainerRoleContainer, nil
+ case acl.RoleInnerRing:
+ return nativeschema.PropertyValueContainerRoleIR, nil
+ case acl.RoleOthers:
+ return nativeschema.PropertyValueContainerRoleOthers, nil
+ default:
+ return "", fmt.Errorf("failed to convert %s", role.String())
+ }
+}
+
+func SchemaMethodFromACLOperation(op acl.Op) (string, error) {
+ switch op {
+ case acl.OpObjectGet:
+ return nativeschema.MethodGetObject, nil
+ case acl.OpObjectHead:
+ return nativeschema.MethodHeadObject, nil
+ case acl.OpObjectPut:
+ return nativeschema.MethodPutObject, nil
+ case acl.OpObjectDelete:
+ return nativeschema.MethodDeleteObject, nil
+ case acl.OpObjectSearch:
+ return nativeschema.MethodSearchObject, nil
+ case acl.OpObjectRange:
+ return nativeschema.MethodRangeObject, nil
+ case acl.OpObjectHash:
+ return nativeschema.MethodHashObject, nil
+ default:
+ return "", fmt.Errorf("operation cannot be converted: %d", op)
+ }
+}
diff --git a/pkg/ape/request/frostfsid.go b/pkg/ape/request/frostfsid.go
new file mode 100644
index 0000000000..d32bd4a07e
--- /dev/null
+++ b/pkg/ape/request/frostfsid.go
@@ -0,0 +1,53 @@
+package request
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+// FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID.
+func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) {
+ reqProps := make(map[string]string)
+ subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash())
+ if err != nil {
+ if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
+ return nil, fmt.Errorf("get subject error: %w", err)
+ }
+ return reqProps, nil
+ }
+ for k, v := range subj.KV {
+ propertyKey := fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, k)
+ reqProps[propertyKey] = v
+ }
+
+ groups := make([]string, len(subj.Groups))
+ for i, group := range subj.Groups {
+ groups[i] = strconv.FormatInt(group.ID, 10)
+ }
+ reqProps[commonschema.PropertyKeyFrostFSIDGroupID] = apechain.FormCondSliceContainsValue(groups)
+
+ return reqProps, nil
+}
+
+// Groups return the actor's group ids from frostfsid contract.
+func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) {
+ subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash())
+ if err != nil {
+ if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
+ return nil, fmt.Errorf("get subject error: %w", err)
+ }
+ return []string{}, nil
+ }
+ groups := make([]string, len(subj.Groups))
+ for i, group := range subj.Groups {
+ groups[i] = strconv.FormatInt(group.ID, 10)
+ }
+ return groups, nil
+}
diff --git a/pkg/ape/request/request.go b/pkg/ape/request/request.go
new file mode 100644
index 0000000000..de67dea235
--- /dev/null
+++ b/pkg/ape/request/request.go
@@ -0,0 +1,55 @@
+package request
+
+import (
+ aperesource "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource"
+)
+
+type Request struct {
+ operation string
+ resource Resource
+ properties map[string]string
+}
+
+func NewRequest(operation string, resource Resource, properties map[string]string) Request {
+ return Request{
+ operation: operation,
+ resource: resource,
+ properties: properties,
+ }
+}
+
+var _ aperesource.Request = Request{}
+
+func (r Request) Operation() string {
+ return r.operation
+}
+
+func (r Request) Property(key string) string {
+ return r.properties[key]
+}
+
+func (r Request) Resource() aperesource.Resource {
+ return r.resource
+}
+
+type Resource struct {
+ name string
+ properties map[string]string
+}
+
+var _ aperesource.Resource = Resource{}
+
+func NewResource(name string, properties map[string]string) Resource {
+ return Resource{
+ name: name,
+ properties: properties,
+ }
+}
+
+func (r Resource) Name() string {
+ return r.name
+}
+
+func (r Resource) Property(key string) string {
+ return r.properties[key]
+}
diff --git a/pkg/ape/router/bearer_overrides.go b/pkg/ape/router/bearer_overrides.go
new file mode 100644
index 0000000000..2bc8ad614d
--- /dev/null
+++ b/pkg/ape/router/bearer_overrides.go
@@ -0,0 +1,94 @@
+package router
+
+import (
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+)
+
+func newTarget(ct ape.ChainTarget) (policyengine.Target, error) {
+ var target policyengine.Target
+ switch ct.TargetType {
+ case ape.TargetTypeContainer:
+ var cid cidSDK.ID
+ err := cid.DecodeString(ct.Name)
+ if err != nil {
+ return target, fmt.Errorf("invalid cid format: %s", target.Name)
+ }
+ target.Type = policyengine.Container
+ case ape.TargetTypeGroup:
+ target.Type = policyengine.Group
+ case ape.TargetTypeNamespace:
+ target.Type = policyengine.Namespace
+ case ape.TargetTypeUser:
+ target.Type = policyengine.User
+ default:
+ return target, fmt.Errorf("unsupported target type: %v", ct.TargetType)
+ }
+ target.Name = ct.Name
+ return target, nil
+}
+
+type morphReaderDecorator struct {
+ policyengine.MorphRuleChainStorageReader
+
+ bearerTokenTarget policyengine.Target
+
+ bearerTokenChains []*chain.Chain
+}
+
+func newMorphReaderDecorator(r policyengine.MorphRuleChainStorageReader, override bearer.APEOverride) (*morphReaderDecorator, error) {
+ if r == nil {
+ return nil, errors.New("empty morph chain rule reader")
+ }
+ t, err := newTarget(override.Target)
+ if err != nil {
+ return nil, err
+ }
+
+ bearerTokenChains := make([]*chain.Chain, len(override.Chains))
+ for i := range override.Chains {
+ chain := new(chain.Chain)
+ if err := chain.DecodeBytes(override.Chains[i].Raw); err != nil {
+ return nil, fmt.Errorf("invalid ape chain: %w", err)
+ }
+ bearerTokenChains[i] = chain
+ }
+
+ return &morphReaderDecorator{
+ MorphRuleChainStorageReader: r,
+ bearerTokenTarget: t,
+ bearerTokenChains: bearerTokenChains,
+ }, nil
+}
+
+func (m *morphReaderDecorator) ListMorphRuleChains(name chain.Name, target policyengine.Target) ([]*chain.Chain, error) {
+ if len(m.bearerTokenChains) > 0 && m.bearerTokenTarget.Type == target.Type {
+ if m.bearerTokenTarget.Name != target.Name {
+ return nil, fmt.Errorf("unexpected bearer token target: %s", m.bearerTokenTarget.Name)
+ }
+ return m.bearerTokenChains, nil
+ }
+ return m.MorphRuleChainStorageReader.ListMorphRuleChains(name, target)
+}
+
+// BearerChainFeedRouter creates a chain router emplacing bearer token rule chains.
+// Bearer token chains override only container target chains within Policy contract. This means the order of checking
+// is as follows:
+//
+// 1. Local overrides;
+// 2. Policy contract chains for a namespace target (as namespace chains have higher priority);
+// 3. Bearer token chains for a container target - if they're not defined, then it checks Policy contract chains;
+// 4. Checks for the remaining targets.
+func BearerChainFeedRouter(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, override bearer.APEOverride) (policyengine.ChainRouter, error) {
+ mr, err := newMorphReaderDecorator(morphChainStorage, override)
+ if err != nil {
+ return nil, fmt.Errorf("create morph reader with bearer override error: %w", err)
+ }
+ return policyengine.NewDefaultChainRouterWithLocalOverrides(mr, localOverrideStorage), nil
+}
diff --git a/pkg/ape/router/bearer_overrides_test.go b/pkg/ape/router/bearer_overrides_test.go
new file mode 100644
index 0000000000..3c12ee6fa2
--- /dev/null
+++ b/pkg/ape/router/bearer_overrides_test.go
@@ -0,0 +1,165 @@
+package router_test
+
+import (
+ "fmt"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
+ apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+ resourcetest "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource/testutil"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ container = "67ETTZzbzJC6WxdQhHHHsJNCttVMBqYrSoFaUFVDNfiX"
+ rootNs = ""
+)
+
+var (
+ allowBySourceIP = &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: chain.Resources{Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container)}},
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: "SourceIP",
+ Value: "10.122.1.20",
+ },
+ },
+ },
+ },
+ }
+
+ denyBySourceIP = &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: chain.Resources{Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container)}},
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: "SourceIP",
+ Value: "10.122.1.20",
+ },
+ },
+ },
+ },
+ }
+)
+
+func TestBearerChainFedRouter(t *testing.T) {
+ t.Run("no bearer token overrides", func(t *testing.T) {
+ inmem := inmemory.NewInMemoryLocalOverrides()
+
+ inmem.LocalStorage().AddOverride(chain.Ingress, engine.ContainerTarget(container), denyBySourceIP)
+ inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(container), allowBySourceIP)
+
+ _, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bearerSDK.APEOverride{})
+ require.Error(t, err)
+ })
+ t.Run("allow by container with deny by bearer overrides", func(t *testing.T) {
+ inmem := inmemory.NewInMemoryLocalOverrides()
+
+ inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(container), allowBySourceIP)
+
+ bt := bearerSDK.APEOverride{
+ Target: apeSDK.ChainTarget{
+ TargetType: apeSDK.TargetTypeContainer,
+ Name: container,
+ },
+ Chains: []apeSDK.Chain{{
+ Raw: denyBySourceIP.Bytes(),
+ }},
+ }
+
+ r, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bt)
+ require.NoError(t, err)
+
+ req := resourcetest.NewRequest(nativeschema.MethodPutObject,
+ resourcetest.NewResource(fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container), map[string]string{}),
+ map[string]string{
+ "SourceIP": "10.122.1.20",
+ "Actor": "someOwner",
+ },
+ )
+
+ st, found, err := r.IsAllowed(chain.Ingress, engine.NewRequestTarget(rootNs, container), req)
+ require.NoError(t, err)
+ require.True(t, found)
+ require.Equal(t, st, chain.AccessDenied)
+ })
+ t.Run("allow by namespace with deny by bearer overrides", func(t *testing.T) {
+ inmem := inmemory.NewInMemoryLocalOverrides()
+
+ inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(container), allowBySourceIP)
+ inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(rootNs), allowBySourceIP)
+
+ bt := bearerSDK.APEOverride{
+ Target: apeSDK.ChainTarget{
+ TargetType: apeSDK.TargetTypeContainer,
+ Name: container,
+ },
+ Chains: []apeSDK.Chain{{
+ Raw: denyBySourceIP.Bytes(),
+ }},
+ }
+
+ r, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bt)
+ require.NoError(t, err)
+
+ req := resourcetest.NewRequest(nativeschema.MethodPutObject,
+ resourcetest.NewResource(fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container), map[string]string{}),
+ map[string]string{
+ "SourceIP": "10.122.1.20",
+ "Actor": "someOwner",
+ },
+ )
+
+ st, found, err := r.IsAllowed(chain.Ingress, engine.NewRequestTarget(rootNs, container), req)
+ require.NoError(t, err)
+ require.True(t, found)
+ require.Equal(t, st, chain.AccessDenied)
+ })
+ t.Run("deny by namespace with allow by bearer overrides", func(t *testing.T) {
+ inmem := inmemory.NewInMemoryLocalOverrides()
+
+ inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(rootNs), denyBySourceIP)
+
+ bt := bearerSDK.APEOverride{
+ Target: apeSDK.ChainTarget{
+ TargetType: apeSDK.TargetTypeContainer,
+ Name: container,
+ },
+ Chains: []apeSDK.Chain{{
+ Raw: allowBySourceIP.Bytes(),
+ }},
+ }
+
+ r, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bt)
+ require.NoError(t, err)
+
+ req := resourcetest.NewRequest(nativeschema.MethodPutObject,
+ resourcetest.NewResource(fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container), map[string]string{}),
+ map[string]string{
+ "SourceIP": "10.122.1.20",
+ "Actor": "someOwner",
+ },
+ )
+
+ st, found, err := r.IsAllowed(chain.Ingress, engine.NewRequestTarget(rootNs, container), req)
+ require.NoError(t, err)
+ require.True(t, found)
+ require.Equal(t, st, chain.AccessDenied)
+ })
+}
diff --git a/pkg/ape/router/single_pass.go b/pkg/ape/router/single_pass.go
new file mode 100644
index 0000000000..ec9244baee
--- /dev/null
+++ b/pkg/ape/router/single_pass.go
@@ -0,0 +1,30 @@
+package router
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+)
+
+// SingleUseRouterWithBearerTokenChains creates chain router with inmemory storage implementation and
+// fed with APE chains defined in Bearer token.
+func SingleUseRouterWithBearerTokenChains(overrides []bearer.APEOverride) (engine.ChainRouter, error) {
+ storage := inmemory.NewInmemoryMorphRuleChainStorage()
+ for _, override := range overrides {
+ target, err := newTarget(override.Target)
+ if err != nil {
+ return nil, err
+ }
+ for i := range override.Chains {
+ chain := new(apechain.Chain)
+ if err := chain.DecodeBytes(override.Chains[i].Raw); err != nil {
+ return nil, fmt.Errorf("invalid ape chain: %w", err)
+ }
+ _, _, _ = storage.AddMorphRuleChain(apechain.Ingress, target, chain)
+ }
+ }
+ return engine.NewDefaultChainRouter(storage), nil
+}
diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go
index d74adddcc3..98bdf99e78 100644
--- a/pkg/core/client/client.go
+++ b/pkg/core/client/client.go
@@ -3,24 +3,22 @@ package client
import (
"context"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
// Client is an interface of FrostFS storage
// node's client.
type Client interface {
- ContainerAnnounceUsedSpace(context.Context, client.PrmAnnounceSpace) (*client.ResAnnounceSpace, error)
- ObjectPutInit(context.Context, client.PrmObjectPutInit) (*client.ObjectWriter, error)
+ ObjectPutInit(context.Context, client.PrmObjectPutInit) (client.ObjectWriter, error)
+ ObjectPutSingle(context.Context, client.PrmObjectPutSingle) (*client.ResObjectPutSingle, error)
ObjectDelete(context.Context, client.PrmObjectDelete) (*client.ResObjectDelete, error)
ObjectGetInit(context.Context, client.PrmObjectGet) (*client.ObjectReader, error)
ObjectHead(context.Context, client.PrmObjectHead) (*client.ResObjectHead, error)
ObjectSearchInit(context.Context, client.PrmObjectSearch) (*client.ObjectListReader, error)
ObjectRangeInit(context.Context, client.PrmObjectRange) (*client.ObjectRangeReader, error)
ObjectHash(context.Context, client.PrmObjectHash) (*client.ResObjectHash, error)
- AnnounceLocalTrust(context.Context, client.PrmAnnounceLocalTrust) (*client.ResAnnounceLocalTrust, error)
- AnnounceIntermediateTrust(context.Context, client.PrmAnnounceIntermediateTrust) (*client.ResAnnounceIntermediateTrust, error)
ExecRaw(f func(client *rawclient.Client) error) error
Close() error
}
@@ -32,7 +30,7 @@ type MultiAddressClient interface {
// RawForAddress must return rawclient.Client
// for the passed network.Address.
- RawForAddress(network.Address, func(cli *rawclient.Client) error) error
+ RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error
ReportError(error)
}
diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go
index 80c8f49b7f..91ee5c6c3c 100644
--- a/pkg/core/client/util.go
+++ b/pkg/core/client/util.go
@@ -3,6 +3,7 @@ package client
import (
"bytes"
"fmt"
+ "iter"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -19,10 +20,11 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro
// Args must not be nil.
func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface {
PublicKey() []byte
- IterateAddresses(func(string) bool)
+ Addresses() iter.Seq[string]
NumberOfAddresses() int
ExternalAddresses() []string
-}) error {
+},
+) error {
var a network.AddressGroup
err := a.FromIterator(info)
@@ -49,7 +51,8 @@ func NodeInfoFromNetmapElement(dst *NodeInfo, info interface {
PublicKey() []byte
Addresses() network.AddressGroup
ExternalAddresses() network.AddressGroup
-}) {
+},
+) {
nodeInfoFromKeyAddr(dst, info.PublicKey(), info.Addresses(), info.ExternalAddresses())
}
diff --git a/pkg/core/container/delete.go b/pkg/core/container/delete.go
index e3379446f7..8c14bdf5e4 100644
--- a/pkg/core/container/delete.go
+++ b/pkg/core/container/delete.go
@@ -1,6 +1,7 @@
package container
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
@@ -8,43 +9,14 @@ import (
// RemovalWitness groups the information required
// to prove and verify the removal of a container.
type RemovalWitness struct {
- cnr cid.ID
+ // ContainerID returns the identifier of the container
+ // to be removed.
+ ContainerID cid.ID
- sig []byte
+ // Signature the signature of the container identifier.
+ Signature *refs.Signature
- token *session.Container
-}
-
-// ContainerID returns the identifier of the container
-// to be removed.
-func (x RemovalWitness) ContainerID() cid.ID {
- return x.cnr
-}
-
-// SetContainerID sets the identifier of the container
-// to be removed.
-func (x *RemovalWitness) SetContainerID(id cid.ID) {
- x.cnr = id
-}
-
-// Signature returns the signature of the container identifier.
-func (x RemovalWitness) Signature() []byte {
- return x.sig
-}
-
-// SetSignature sets a signature of the container identifier.
-func (x *RemovalWitness) SetSignature(sig []byte) {
- x.sig = sig
-}
-
-// SessionToken returns the token of the session within
-// which the container was removed.
-func (x RemovalWitness) SessionToken() *session.Container {
- return x.token
-}
-
-// SetSessionToken sets the token of the session within
-// which the container was removed.
-func (x *RemovalWitness) SetSessionToken(tok *session.Container) {
- x.token = tok
+ // SessionToken the token of the session within
+ // which the container was removed.
+ SessionToken *session.Container
}
diff --git a/pkg/core/container/ec.go b/pkg/core/container/ec.go
new file mode 100644
index 0000000000..1acb87f2b9
--- /dev/null
+++ b/pkg/core/container/ec.go
@@ -0,0 +1,11 @@
+package container
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+)
+
+// IsECContainer returns True if container has erasure coding policy.
+func IsECContainer(cnr containerSDK.Container) bool {
+ return policy.IsECPlacement(cnr.PlacementPolicy())
+}
diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go
new file mode 100644
index 0000000000..1c52d93e74
--- /dev/null
+++ b/pkg/core/container/info.go
@@ -0,0 +1,104 @@
+package container
+
+import (
+ "context"
+ "sync"
+
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+)
+
+type Info struct {
+ Indexed bool
+ Removed bool
+}
+
+type infoValue struct {
+ info Info
+ err error
+}
+
+type InfoProvider interface {
+ Info(ctx context.Context, id cid.ID) (Info, error)
+}
+
+type infoProvider struct {
+ mtx *sync.RWMutex
+ cache map[cid.ID]infoValue
+ kl *utilSync.KeyLocker[cid.ID]
+
+ source Source
+ sourceErr error
+ sourceOnce *sync.Once
+ sourceFactory func() (Source, error)
+}
+
+func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider {
+ return &infoProvider{
+ mtx: &sync.RWMutex{},
+ cache: make(map[cid.ID]infoValue),
+ sourceOnce: &sync.Once{},
+ kl: utilSync.NewKeyLocker[cid.ID](),
+ sourceFactory: sourceFactory,
+ }
+}
+
+func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) {
+ v, found := r.tryGetFromCache(id)
+ if found {
+ return v.info, v.err
+ }
+
+ return r.getFromSource(ctx, id)
+}
+
+func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ value, found := r.cache[id]
+ return value, found
+}
+
+func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) {
+ r.kl.Lock(id)
+ defer r.kl.Unlock(id)
+
+ if v, ok := r.tryGetFromCache(id); ok {
+ return v.info, v.err
+ }
+
+ r.sourceOnce.Do(func() {
+ r.source, r.sourceErr = r.sourceFactory()
+ })
+ if r.sourceErr != nil {
+ return Info{}, r.sourceErr
+ }
+
+ cnr, err := r.source.Get(ctx, id)
+ var civ infoValue
+ if err != nil {
+ if client.IsErrContainerNotFound(err) {
+ removed, err := WasRemoved(ctx, r.source, id)
+ if err != nil {
+ civ.err = err
+ } else {
+ civ.info.Removed = removed
+ }
+ } else {
+ civ.err = err
+ }
+ } else {
+ civ.info.Indexed = IsIndexedContainer(cnr.Value)
+ }
+ r.putToCache(id, civ)
+ return civ.info, civ.err
+}
+
+func (r *infoProvider) putToCache(id cid.ID, ct infoValue) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ r.cache[id] = ct
+}
diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go
index 3ed54529ca..4eb14e53c5 100644
--- a/pkg/core/container/storage.go
+++ b/pkg/core/container/storage.go
@@ -1,14 +1,14 @@
package container
import (
- "errors"
+ "context"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
// Container groups information about the FrostFS container stored in the FrostFS network.
@@ -23,6 +23,15 @@ type Container struct {
Session *session.Container
}
+// DelInfo contains info about removed container.
+type DelInfo struct {
+ // Container owner.
+ Owner user.ID
+
+ // Epoch indicates when the container was removed.
+ Epoch uint64
+}
+
// Source is an interface that wraps
// basic container receiving method.
type Source interface {
@@ -34,13 +43,9 @@ type Source interface {
//
// Implementations must not retain the container pointer and modify
// the container through it.
- Get(cid.ID) (*Container, error)
-}
+ Get(ctx context.Context, cid cid.ID) (*Container, error)
-// IsErrNotFound checks if the error returned by Source.Get corresponds
-// to the missing container.
-func IsErrNotFound(err error) bool {
- return errors.As(err, new(apistatus.ContainerNotFound))
+ DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error)
}
// EACL groups information about the FrostFS container's extended ACL stored in
@@ -55,16 +60,3 @@ type EACL struct {
// Session within which Value was set. Nil means session absence.
Session *session.Container
}
-
-// EACLSource is the interface that wraps
-// basic methods of extended ACL table source.
-type EACLSource interface {
- // GetEACL reads the table from the source by identifier.
- // It returns any error encountered.
- //
- // GetEACL must return exactly one non-nil value.
- //
- // Must return apistatus.ErrEACLNotFound if requested
- // eACL table is not in source.
- GetEACL(cid.ID) (*EACL, error)
-}
diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go
new file mode 100644
index 0000000000..61c568052e
--- /dev/null
+++ b/pkg/core/container/util.go
@@ -0,0 +1,35 @@
+package container
+
+import (
+ "context"
+ "errors"
+
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+)
+
+// WasRemoved checks whether the container ever existed or
+// it just has not been created yet at the current epoch.
+func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) {
+ _, err := s.DeletionInfo(ctx, cid)
+ if err == nil {
+ return true, nil
+ }
+ var errContainerNotFound *apistatus.ContainerNotFound
+ if errors.As(err, &errContainerNotFound) {
+ return false, nil
+ }
+ return false, err
+}
+
+// IsIndexedContainer returns True if container attributes should be indexed.
+func IsIndexedContainer(cnr containerSDK.Container) bool {
+ var isS3Container bool
+ for key := range cnr.Attributes() {
+ if key == ".s3-location-constraint" {
+ isS3Container = true
+ }
+ }
+ return !isS3Container
+}
diff --git a/pkg/core/frostfsid/subject_provider.go b/pkg/core/frostfsid/subject_provider.go
new file mode 100644
index 0000000000..e752043d3d
--- /dev/null
+++ b/pkg/core/frostfsid/subject_provider.go
@@ -0,0 +1,18 @@
+package frostfsid
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+)
+
+const (
+ SubjectNotFoundErrorMessage = "subject not found"
+)
+
+// SubjectProvider interface provides methods to get subject from FrostfsID contract.
+type SubjectProvider interface {
+ GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error)
+ GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error)
+}
diff --git a/pkg/core/netmap/keys.go b/pkg/core/netmap/keys.go
index 29cb2dc943..0c64bb7984 100644
--- a/pkg/core/netmap/keys.go
+++ b/pkg/core/netmap/keys.go
@@ -2,6 +2,6 @@ package netmap
// AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes.
type AnnouncedKeys interface {
- // Checks if the key was announced by a local node.
+ // IsLocalKey checks if the key was announced by a local node.
IsLocalKey(key []byte) bool
}
diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go
index 17fccc6209..e58e426343 100644
--- a/pkg/core/netmap/nodes.go
+++ b/pkg/core/netmap/nodes.go
@@ -1,6 +1,10 @@
package netmap
-import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+import (
+ "iter"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
// Node is a named type of netmap.NodeInfo which provides interface needed
// in the current repository. Node is expected to be used everywhere instead
@@ -14,12 +18,17 @@ func (x Node) PublicKey() []byte {
return (netmap.NodeInfo)(x).PublicKey()
}
+// Addresses returns an iterator over all announced network addresses.
+func (x Node) Addresses() iter.Seq[string] {
+ return (netmap.NodeInfo)(x).NetworkEndpoints()
+}
+
// IterateAddresses iterates over all announced network addresses
// and passes them into f. Handler MUST NOT be nil.
+// Deprecated: use [Node.Addresses] instead.
func (x Node) IterateAddresses(f func(string) bool) {
- (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
- for _, addr := range (netmap.NodeInfo)(x).ExternalAddresses() {
- if f(addr) {
+ for s := range (netmap.NodeInfo)(x).NetworkEndpoints() {
+ if f(s) {
return
}
}
diff --git a/pkg/core/netmap/storage.go b/pkg/core/netmap/storage.go
index 7770c61c76..97313da841 100644
--- a/pkg/core/netmap/storage.go
+++ b/pkg/core/netmap/storage.go
@@ -1,6 +1,8 @@
package netmap
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -16,7 +18,7 @@ type Source interface {
//
// Implementations must not retain the network map pointer and modify
// the network map through it.
- GetNetMap(diff uint64) (*netmap.NetMap, error)
+ GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error)
// GetNetMapByEpoch reads network map by the epoch number from the storage.
// It returns the pointer to the requested network map and any error encountered.
@@ -25,21 +27,21 @@ type Source interface {
//
// Implementations must not retain the network map pointer and modify
// the network map through it.
- GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error)
+ GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error)
// Epoch reads the current epoch from the storage.
// It returns thw number of the current epoch and any error encountered.
//
// Must return exactly one non-default value.
- Epoch() (uint64, error)
+ Epoch(ctx context.Context) (uint64, error)
}
// GetLatestNetworkMap requests and returns the latest network map from the storage.
-func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) {
- return src.GetNetMap(0)
+func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) {
+ return src.GetNetMap(ctx, 0)
}
// GetPreviousNetworkMap requests and returns previous from the latest network map from the storage.
-func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) {
- return src.GetNetMap(1)
+func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) {
+ return src.GetNetMap(ctx, 1)
}
diff --git a/pkg/core/object/address.go b/pkg/core/object/address.go
deleted file mode 100644
index cd5559d9ff..0000000000
--- a/pkg/core/object/address.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package object
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-// AddressWithType groups object address with its FrostFS
-// object type.
-type AddressWithType struct {
- Address oid.Address
- Type object.Type
-}
diff --git a/pkg/core/object/ec.go b/pkg/core/object/ec.go
new file mode 100644
index 0000000000..549ff7cd37
--- /dev/null
+++ b/pkg/core/object/ec.go
@@ -0,0 +1,13 @@
+package object
+
+import (
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+)
+
+// IsECSupported returns True if EC supported for object.
+//
+// EC supported only for regular, not linking objects.
+func IsECSupported(obj *objectSDK.Object) bool {
+ return obj.Type() == objectSDK.TypeRegular &&
+ len(obj.Children()) == 0
+}
diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go
index 804f0bd403..cf090eb375 100644
--- a/pkg/core/object/fmt.go
+++ b/pkg/core/object/fmt.go
@@ -1,57 +1,49 @@
package object
import (
+ "context"
"crypto/ecdsa"
+ "crypto/sha256"
"errors"
"fmt"
"strconv"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
// FormatValidator represents an object format validator.
type FormatValidator struct {
*cfg
+
+ senderClassifier SenderClassifier
}
// FormatValidatorOption represents a FormatValidator constructor option.
type FormatValidatorOption func(*cfg)
type cfg struct {
- netState netmap.State
- e LockSource
-}
-
-// DeleteHandler is an interface of delete queue processor.
-type DeleteHandler interface {
- // DeleteObjects places objects to a removal queue.
- //
- // Returns apistatus.LockNonRegularObject if at least one object
- // is locked.
- DeleteObjects(oid.Address, ...oid.Address) error
+ netState netmap.State
+ e LockSource
+ ir InnerRing
+ netmap netmap.Source
+ containers container.Source
+ log *logger.Logger
+ verifyTokenIssuer bool
}
// LockSource is a source of lock relations between the objects.
type LockSource interface {
// IsLocked must clarify object's lock status.
- IsLocked(address oid.Address) (bool, error)
-}
-
-// Locker is an object lock storage interface.
-type Locker interface {
- // Lock list of objects as locked by locker in the specified container.
- //
- // Returns apistatus.LockNonRegularObject if at least object in locked
- // list is irregular (not type of REGULAR).
- Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error
+ IsLocked(ctx context.Context, address oid.Address) (bool, error)
}
var errNilObject = errors.New("object is nil")
@@ -64,7 +56,7 @@ var errNoExpirationEpoch = errors.New("missing expiration epoch attribute")
var errTombstoneExpiration = errors.New("tombstone body and header contain different expiration values")
-var errEmptySGMembers = errors.New("storage group with empty members list")
+var errMissingSignature = errors.New("missing signature")
func defaultCfg() *cfg {
return new(cfg)
@@ -79,7 +71,8 @@ func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator {
}
return &FormatValidator{
- cfg: cfg,
+ cfg: cfg,
+ senderClassifier: NewSenderClassifier(cfg.ir, cfg.netmap, cfg.log),
}
}
@@ -89,7 +82,7 @@ func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator {
// If unprepared is true, only fields set by user are validated.
//
// Returns nil error if the object has valid structure.
-func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
+func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, unprepared bool) error {
if obj == nil {
return errNilObject
}
@@ -112,34 +105,39 @@ func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
return fmt.Errorf("invalid attributes: %w", err)
}
+ exp, err := expirationEpochAttribute(obj)
+ if err != nil {
+ if !errors.Is(err, errNoExpirationEpoch) {
+ return fmt.Errorf("object did not pass expiration check: %w", err)
+ }
+ } else if !unprepared && exp < v.netState.CurrentEpoch() {
+ if err := v.checkIfExpired(ctx, obj); err != nil {
+ return fmt.Errorf("object did not pass expiration check: %w", err)
+ }
+ }
+
if !unprepared {
- if err := v.validateSignatureKey(obj); err != nil {
+ if err := v.validateSignatureKey(ctx, obj); err != nil {
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
}
- if err := v.checkExpiration(obj); err != nil {
- return fmt.Errorf("object did not pass expiration check: %w", err)
- }
-
- if err := object.CheckHeaderVerificationFields(obj); err != nil {
+ if err := objectSDK.CheckHeaderVerificationFields(obj); err != nil {
return fmt.Errorf("(%T) could not validate header fields: %w", v, err)
}
}
if obj = obj.Parent(); obj != nil {
// Parent object already exists.
- return v.Validate(obj, false)
+ return v.Validate(ctx, obj, false)
}
return nil
}
-func (v *FormatValidator) validateSignatureKey(obj *object.Object) error {
- // FIXME(@cthulhu-rider): temp solution, see neofs-sdk-go#233
+func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error {
sig := obj.Signature()
if sig == nil {
- // TODO(@cthulhu-rider): #1387 use "const" error
- return errors.New("missing signature")
+ return errMissingSignature
}
var sigV2 refs.Signature
@@ -155,16 +153,64 @@ func (v *FormatValidator) validateSignatureKey(obj *object.Object) error {
}
token := obj.SessionToken()
+ ownerID := obj.OwnerID()
- if token == nil || !token.AssertAuthKey(&key) {
- return v.checkOwnerKey(*obj.OwnerID(), key)
+ if token == nil && obj.ECHeader() != nil {
+ role, err := v.isIROrContainerNode(ctx, obj, binKey)
+ if err != nil {
+ return err
+ }
+ if role == acl.RoleContainer {
+ // EC part could be restored or created by container node, so ownerID could not match object signature
+ return nil
+ }
+ return v.checkOwnerKey(ownerID, key)
}
- // FIXME: #1159 perform token verification
+ if token == nil || !token.AssertAuthKey(&key) {
+ return v.checkOwnerKey(ownerID, key)
+ }
+
+ if v.verifyTokenIssuer {
+ role, err := v.isIROrContainerNode(ctx, obj, binKey)
+ if err != nil {
+ return err
+ }
+
+ if role == acl.RoleContainer || role == acl.RoleInnerRing {
+ return nil
+ }
+
+ if !token.Issuer().Equals(ownerID) {
+ return fmt.Errorf("(%T) different token issuer and object owner identifiers %s/%s", v, token.Issuer(), ownerID)
+ }
+ return nil
+ }
return nil
}
+func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
+ cnrID, containerIDSet := obj.ContainerID()
+ if !containerIDSet {
+ return acl.RoleOthers, errNilCID
+ }
+
+ cnrIDBin := make([]byte, sha256.Size)
+ cnrID.Encode(cnrIDBin)
+
+ cnr, err := v.containers.Get(ctx, cnrID)
+ if err != nil {
+ return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
+ }
+
+ res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value)
+ if err != nil {
+ return acl.RoleOthers, err
+ }
+ return res.Role, nil
+}
+
func (v *FormatValidator) checkOwnerKey(id user.ID, key frostfsecdsa.PublicKey) error {
var id2 user.ID
user.IDFromKey(&id2, (ecdsa.PublicKey)(key))
@@ -179,133 +225,41 @@ func (v *FormatValidator) checkOwnerKey(id user.ID, key frostfsecdsa.PublicKey)
// ContentMeta describes FrostFS meta information that brings object's payload if the object
// is one of:
// - object.TypeTombstone;
-// - object.TypeStorageGroup;
// - object.TypeLock.
type ContentMeta struct {
- typ object.Type
+ typ objectSDK.Type
objs []oid.ID
}
// Type returns object's type.
-func (i ContentMeta) Type() object.Type {
+func (i ContentMeta) Type() objectSDK.Type {
return i.typ
}
// Objects returns objects that the original object's payload affects:
// - inhumed objects, if the original object is a Tombstone;
// - locked objects, if the original object is a Lock;
-// - members of a storage group, if the original object is a Storage group;
// - nil, if the original object is a Regular object.
func (i ContentMeta) Objects() []oid.ID {
return i.objs
}
// ValidateContent validates payload content according to the object type.
-func (v *FormatValidator) ValidateContent(o *object.Object) (ContentMeta, error) {
+func (v *FormatValidator) ValidateContent(o *objectSDK.Object) (ContentMeta, error) {
meta := ContentMeta{
typ: o.Type(),
}
switch o.Type() {
- case object.TypeRegular:
- // ignore regular objects, they do not need payload formatting
- case object.TypeTombstone:
- if len(o.Payload()) == 0 {
- return ContentMeta{}, fmt.Errorf("(%T) empty payload in tombstone", v)
- }
-
- tombstone := object.NewTombstone()
-
- if err := tombstone.Unmarshal(o.Payload()); err != nil {
- return ContentMeta{}, fmt.Errorf("(%T) could not unmarshal tombstone content: %w", v, err)
- }
-
- // check if the tombstone has the same expiration in the body and the header
- exp, err := expirationEpochAttribute(o)
- if err != nil {
+ case objectSDK.TypeTombstone:
+ if err := v.fillAndValidateTombstoneMeta(o, &meta); err != nil {
return ContentMeta{}, err
}
-
- if exp != tombstone.ExpirationEpoch() {
- return ContentMeta{}, errTombstoneExpiration
+ case objectSDK.TypeLock:
+ if err := v.fillAndValidateLockMeta(o, &meta); err != nil {
+ return ContentMeta{}, err
}
-
- // mark all objects from the tombstone body as removed in the storage engine
- _, ok := o.ContainerID()
- if !ok {
- return ContentMeta{}, errors.New("missing container ID")
- }
-
- idList := tombstone.Members()
- meta.objs = idList
- case object.TypeStorageGroup:
- if len(o.Payload()) == 0 {
- return ContentMeta{}, fmt.Errorf("(%T) empty payload in SG", v)
- }
-
- var sg storagegroup.StorageGroup
-
- if err := sg.Unmarshal(o.Payload()); err != nil {
- return ContentMeta{}, fmt.Errorf("(%T) could not unmarshal SG content: %w", v, err)
- }
-
- mm := sg.Members()
- meta.objs = mm
-
- lenMM := len(mm)
- if lenMM == 0 {
- return ContentMeta{}, errEmptySGMembers
- }
-
- uniqueFilter := make(map[oid.ID]struct{}, lenMM)
-
- for i := 0; i < lenMM; i++ {
- if _, alreadySeen := uniqueFilter[mm[i]]; alreadySeen {
- return ContentMeta{}, fmt.Errorf("storage group contains non-unique member: %s", mm[i])
- }
-
- uniqueFilter[mm[i]] = struct{}{}
- }
- case object.TypeLock:
- if len(o.Payload()) == 0 {
- return ContentMeta{}, errors.New("empty payload in lock")
- }
-
- _, ok := o.ContainerID()
- if !ok {
- return ContentMeta{}, errors.New("missing container")
- }
-
- _, ok = o.ID()
- if !ok {
- return ContentMeta{}, errors.New("missing ID")
- }
-
- // check that LOCK object has correct expiration epoch
- lockExp, err := expirationEpochAttribute(o)
- if err != nil {
- return ContentMeta{}, fmt.Errorf("lock object expiration epoch: %w", err)
- }
-
- if currEpoch := v.netState.CurrentEpoch(); lockExp < currEpoch {
- return ContentMeta{}, fmt.Errorf("lock object expiration: %d; current: %d", lockExp, currEpoch)
- }
-
- var lock object.Lock
-
- err = lock.Unmarshal(o.Payload())
- if err != nil {
- return ContentMeta{}, fmt.Errorf("decode lock payload: %w", err)
- }
-
- num := lock.NumberOfMembers()
- if num == 0 {
- return ContentMeta{}, errors.New("missing locked members")
- }
-
- meta.objs = make([]oid.ID, num)
- lock.ReadMembers(meta.objs)
default:
// ignore all other object types, they do not need payload formatting
}
@@ -313,43 +267,99 @@ func (v *FormatValidator) ValidateContent(o *object.Object) (ContentMeta, error)
return meta, nil
}
-var errExpired = errors.New("object has expired")
+func (v *FormatValidator) fillAndValidateLockMeta(o *objectSDK.Object, meta *ContentMeta) error {
+ if len(o.Payload()) == 0 {
+ return errors.New("empty payload in lock")
+ }
-func (v *FormatValidator) checkExpiration(obj *object.Object) error {
- exp, err := expirationEpochAttribute(obj)
+ if _, ok := o.ContainerID(); !ok {
+ return errors.New("missing container")
+ }
+
+ if _, ok := o.ID(); !ok {
+ return errors.New("missing ID")
+ }
+ // check that LOCK object has correct expiration epoch
+ lockExp, err := expirationEpochAttribute(o)
if err != nil {
- if errors.Is(err, errNoExpirationEpoch) {
- return nil // objects without expiration attribute are valid
- }
+ return fmt.Errorf("lock object expiration epoch: %w", err)
+ }
+ if currEpoch := v.netState.CurrentEpoch(); lockExp < currEpoch {
+ return fmt.Errorf("lock object expiration: %d; current: %d", lockExp, currEpoch)
+ }
+
+ var lock objectSDK.Lock
+
+ if err = lock.Unmarshal(o.Payload()); err != nil {
+ return fmt.Errorf("decode lock payload: %w", err)
+ }
+
+ num := lock.NumberOfMembers()
+ if num == 0 {
+ return errors.New("missing locked members")
+ }
+
+ meta.objs = make([]oid.ID, num)
+ lock.ReadMembers(meta.objs)
+ return nil
+}
+
+func (v *FormatValidator) fillAndValidateTombstoneMeta(o *objectSDK.Object, meta *ContentMeta) error {
+ if len(o.Payload()) == 0 {
+ return fmt.Errorf("(%T) empty payload in tombstone", v)
+ }
+
+ tombstone := objectSDK.NewTombstone()
+
+ if err := tombstone.Unmarshal(o.Payload()); err != nil {
+ return fmt.Errorf("(%T) could not unmarshal tombstone content: %w", v, err)
+ }
+ // check if the tombstone has the same expiration in the body and the header
+ exp, err := expirationEpochAttribute(o)
+ if err != nil {
return err
}
- if exp < v.netState.CurrentEpoch() {
- // an object could be expired but locked;
- // put such an object is a correct operation
+ if exp != tombstone.ExpirationEpoch() {
+ return errTombstoneExpiration
+ }
- cID, _ := obj.ContainerID()
- oID, _ := obj.ID()
+ // mark all objects from the tombstone body as removed in the storage engine
+ if _, ok := o.ContainerID(); !ok {
+ return errors.New("missing container ID")
+ }
- var addr oid.Address
- addr.SetContainer(cID)
- addr.SetObject(oID)
+ meta.objs = tombstone.Members()
+ return nil
+}
- locked, err := v.e.IsLocked(addr)
- if err != nil {
- return fmt.Errorf("locking status check for an expired object: %w", err)
- }
+var errExpired = errors.New("object has expired")
- if !locked {
- return errExpired
- }
+func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Object) error {
+ // an object could be expired but locked;
+ // put such an object is a correct operation
+
+ cID, _ := obj.ContainerID()
+ oID, _ := obj.ID()
+
+ var addr oid.Address
+ addr.SetContainer(cID)
+ addr.SetObject(oID)
+
+ locked, err := v.e.IsLocked(ctx, addr)
+ if err != nil {
+ return fmt.Errorf("locking status check for an expired object: %w", err)
+ }
+
+ if !locked {
+ return errExpired
}
return nil
}
-func expirationEpochAttribute(obj *object.Object) (uint64, error) {
+func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) {
for _, a := range obj.Attributes() {
if a.Key() != objectV2.SysAttributeExpEpoch {
continue
@@ -366,7 +376,7 @@ var (
errEmptyAttrVal = errors.New("empty attribute value")
)
-func (v *FormatValidator) checkAttributes(obj *object.Object) error {
+func (v *FormatValidator) checkAttributes(obj *objectSDK.Object) error {
as := obj.Attributes()
mUnique := make(map[string]struct{}, len(as))
@@ -390,8 +400,8 @@ func (v *FormatValidator) checkAttributes(obj *object.Object) error {
var errIncorrectOwner = errors.New("incorrect object owner")
-func (v *FormatValidator) checkOwner(obj *object.Object) error {
- if idOwner := obj.OwnerID(); idOwner == nil || len(idOwner.WalletBytes()) == 0 {
+func (v *FormatValidator) checkOwner(obj *objectSDK.Object) error {
+ if idOwner := obj.OwnerID(); idOwner.IsEmpty() {
return errIncorrectOwner
}
@@ -411,3 +421,38 @@ func WithLockSource(e LockSource) FormatValidatorOption {
c.e = e
}
}
+
+// WithInnerRing return option to set Inner Ring source.
+func WithInnerRing(ir InnerRing) FormatValidatorOption {
+ return func(c *cfg) {
+ c.ir = ir
+ }
+}
+
+// WithNetmapSource return option to set Netmap source.
+func WithNetmapSource(ns netmap.Source) FormatValidatorOption {
+ return func(c *cfg) {
+ c.netmap = ns
+ }
+}
+
+// WithContainersSource return option to set Containers source.
+func WithContainersSource(cs container.Source) FormatValidatorOption {
+ return func(c *cfg) {
+ c.containers = cs
+ }
+}
+
+// WithVerifySessionTokenIssuer return option to set verify session token issuer value.
+func WithVerifySessionTokenIssuer(verifySessionTokenIssuer bool) FormatValidatorOption {
+ return func(c *cfg) {
+ c.verifyTokenIssuer = verifySessionTokenIssuer
+ }
+}
+
+// WithLogger return option to set logger.
+func WithLogger(l *logger.Logger) FormatValidatorOption {
+ return func(c *cfg) {
+ c.log = l
+ }
+}
diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go
index 563c7827d7..dc336eb345 100644
--- a/pkg/core/object/fmt_test.go
+++ b/pkg/core/object/fmt_test.go
@@ -1,29 +1,39 @@
package object
import (
+ "context"
"crypto/ecdsa"
+ "fmt"
"strconv"
"testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
+ "go.uber.org/zap/zaptest"
)
-func blankValidObject(key *ecdsa.PrivateKey) *object.Object {
+func blankValidObject(key *ecdsa.PrivateKey) *objectSDK.Object {
var idOwner user.ID
user.IDFromKey(&idOwner, key.PublicKey)
- obj := object.New()
+ obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
- obj.SetOwnerID(&idOwner)
+ obj.SetOwnerID(idOwner)
return obj
}
@@ -40,7 +50,7 @@ type testLockSource struct {
m map[oid.Address]bool
}
-func (t testLockSource) IsLocked(address oid.Address) (bool, error) {
+func (t testLockSource) IsLocked(_ context.Context, address oid.Address) (bool, error) {
return t.m[address], nil
}
@@ -56,34 +66,35 @@ func TestFormatValidator_Validate(t *testing.T) {
epoch: curEpoch,
}),
WithLockSource(ls),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
ownerKey, err := keys.NewPrivateKey()
require.NoError(t, err)
t.Run("nil input", func(t *testing.T) {
- require.Error(t, v.Validate(nil, true))
+ require.Error(t, v.Validate(context.Background(), nil, true))
})
t.Run("nil identifier", func(t *testing.T) {
- obj := object.New()
+ obj := objectSDK.New()
- require.ErrorIs(t, v.Validate(obj, false), errNilID)
+ require.ErrorIs(t, v.Validate(context.Background(), obj, false), errNilID)
})
t.Run("nil container identifier", func(t *testing.T) {
- obj := object.New()
+ obj := objectSDK.New()
obj.SetID(oidtest.ID())
- require.ErrorIs(t, v.Validate(obj, true), errNilCID)
+ require.ErrorIs(t, v.Validate(context.Background(), obj, true), errNilCID)
})
t.Run("unsigned object", func(t *testing.T) {
- obj := object.New()
+ obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
obj.SetID(oidtest.ID())
- require.Error(t, v.Validate(obj, false))
+ require.Error(t, v.Validate(context.Background(), obj, false))
})
t.Run("correct w/ session token", func(t *testing.T) {
@@ -94,33 +105,33 @@ func TestFormatValidator_Validate(t *testing.T) {
err := tok.Sign(ownerKey.PrivateKey)
require.NoError(t, err)
- obj := object.New()
+ obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
obj.SetSessionToken(tok)
- obj.SetOwnerID(&idOwner)
+ obj.SetOwnerID(idOwner)
- require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
+ require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj))
- require.NoError(t, v.Validate(obj, false))
+ require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("correct w/o session token", func(t *testing.T) {
obj := blankValidObject(&ownerKey.PrivateKey)
- require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
+ require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj))
- require.NoError(t, v.Validate(obj, false))
+ require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("tombstone content", func(t *testing.T) {
- obj := object.New()
- obj.SetType(object.TypeTombstone)
+ obj := objectSDK.New()
+ obj.SetType(objectSDK.TypeTombstone)
obj.SetContainerID(cidtest.ID())
_, err := v.ValidateContent(obj)
require.Error(t, err) // no tombstone content
- content := object.NewTombstone()
+ content := objectSDK.NewTombstone()
content.SetMembers([]oid.ID{oidtest.ID()})
data, err := content.Marshal()
@@ -141,7 +152,7 @@ func TestFormatValidator_Validate(t *testing.T) {
_, err = v.ValidateContent(obj)
require.Error(t, err) // no expiration epoch in tombstone
- var expirationAttribute object.Attribute
+ var expirationAttribute objectSDK.Attribute
expirationAttribute.SetKey(objectV2.SysAttributeExpEpoch)
expirationAttribute.SetValue(strconv.Itoa(10))
@@ -163,80 +174,29 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, err) // all good
require.EqualValues(t, []oid.ID{id}, contentGot.Objects())
- require.Equal(t, object.TypeTombstone, contentGot.Type())
- })
-
- t.Run("storage group content", func(t *testing.T) {
- obj := object.New()
- obj.SetType(object.TypeStorageGroup)
-
- t.Run("empty payload", func(t *testing.T) {
- _, err := v.ValidateContent(obj)
- require.Error(t, err)
- })
-
- var content storagegroup.StorageGroup
- content.SetExpirationEpoch(1) // some non-default value
-
- t.Run("empty members", func(t *testing.T) {
- data, err := content.Marshal()
- require.NoError(t, err)
-
- obj.SetPayload(data)
-
- _, err = v.ValidateContent(obj)
- require.ErrorIs(t, err, errEmptySGMembers)
- })
-
- t.Run("non-unique members", func(t *testing.T) {
- id := oidtest.ID()
-
- content.SetMembers([]oid.ID{id, id})
-
- data, err := content.Marshal()
- require.NoError(t, err)
-
- obj.SetPayload(data)
-
- _, err = v.ValidateContent(obj)
- require.Error(t, err)
- })
-
- t.Run("correct SG", func(t *testing.T) {
- ids := []oid.ID{oidtest.ID(), oidtest.ID()}
- content.SetMembers(ids)
-
- data, err := content.Marshal()
- require.NoError(t, err)
-
- obj.SetPayload(data)
-
- content, err := v.ValidateContent(obj)
- require.NoError(t, err)
-
- require.EqualValues(t, ids, content.Objects())
- require.Equal(t, object.TypeStorageGroup, content.Type())
- })
+ require.Equal(t, objectSDK.TypeTombstone, contentGot.Type())
})
t.Run("expiration", func(t *testing.T) {
- fn := func(val string) *object.Object {
+ fn := func(val string) *objectSDK.Object {
obj := blankValidObject(&ownerKey.PrivateKey)
- var a object.Attribute
+ var a objectSDK.Attribute
a.SetKey(objectV2.SysAttributeExpEpoch)
a.SetValue(val)
obj.SetAttributes(a)
- require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
+ require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj))
return obj
}
t.Run("invalid attribute value", func(t *testing.T) {
val := "text"
- err := v.Validate(fn(val), false)
+ err := v.Validate(context.Background(), fn(val), false)
+ require.Error(t, err)
+ err = v.Validate(context.Background(), fn(val), true)
require.Error(t, err)
})
@@ -245,7 +205,7 @@ func TestFormatValidator_Validate(t *testing.T) {
obj := fn(val)
t.Run("non-locked", func(t *testing.T) {
- err := v.Validate(obj, false)
+ err := v.Validate(context.Background(), obj, false)
require.ErrorIs(t, err, errExpired)
})
@@ -258,14 +218,14 @@ func TestFormatValidator_Validate(t *testing.T) {
addr.SetObject(oID)
ls.m[addr] = true
- err := v.Validate(obj, false)
+ err := v.Validate(context.Background(), obj, false)
require.NoError(t, err)
})
})
t.Run("alive object", func(t *testing.T) {
val := strconv.FormatUint(curEpoch, 10)
- err := v.Validate(fn(val), true)
+ err := v.Validate(context.Background(), fn(val), true)
require.NoError(t, err)
})
})
@@ -274,11 +234,11 @@ func TestFormatValidator_Validate(t *testing.T) {
t.Run("duplication", func(t *testing.T) {
obj := blankValidObject(&ownerKey.PrivateKey)
- var a1 object.Attribute
+ var a1 objectSDK.Attribute
a1.SetKey("key1")
a1.SetValue("val1")
- var a2 object.Attribute
+ var a2 objectSDK.Attribute
a2.SetKey("key2")
a2.SetValue("val2")
@@ -297,7 +257,7 @@ func TestFormatValidator_Validate(t *testing.T) {
t.Run("empty value", func(t *testing.T) {
obj := blankValidObject(&ownerKey.PrivateKey)
- var a object.Attribute
+ var a objectSDK.Attribute
a.SetKey("key")
obj.SetAttributes(a)
@@ -307,3 +267,333 @@ func TestFormatValidator_Validate(t *testing.T) {
})
})
}
+
+func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
+ const curEpoch = 13
+
+ ls := testLockSource{
+ m: make(map[oid.Address]bool),
+ }
+
+ signer, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ var owner user.ID
+ ownerPrivKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ user.IDFromKey(&owner, ownerPrivKey.PrivateKey.PublicKey)
+
+ t.Run("different issuer and owner, verify issuer disabled", func(t *testing.T) {
+ t.Parallel()
+ v := NewFormatValidator(
+ WithNetState(testNetState{
+ epoch: curEpoch,
+ }),
+ WithLockSource(ls),
+ WithVerifySessionTokenIssuer(false),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ )
+
+ tok := sessiontest.Object()
+ fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
+ tok.SetID(uuid.New())
+ tok.SetAuthKey(&fsPubKey)
+ tok.SetExp(100500)
+ tok.SetIat(1)
+ tok.SetNbf(1)
+ require.NoError(t, tok.Sign(signer.PrivateKey))
+
+ obj := objectSDK.New()
+ obj.SetContainerID(cidtest.ID())
+ obj.SetSessionToken(tok)
+ obj.SetOwnerID(owner)
+ require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
+
+ require.NoError(t, v.Validate(context.Background(), obj, false))
+ })
+
+ t.Run("different issuer and owner, issuer is IR node, verify issuer enabled", func(t *testing.T) {
+ t.Parallel()
+
+ cnrID := cidtest.ID()
+ cont := containerSDK.Container{}
+ cont.Init()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ cont.SetPlacementPolicy(pp)
+
+ v := NewFormatValidator(
+ WithNetState(testNetState{
+ epoch: curEpoch,
+ }),
+ WithLockSource(ls),
+ WithVerifySessionTokenIssuer(true),
+ WithInnerRing(&testIRSource{
+ irNodes: [][]byte{signer.PublicKey().Bytes()},
+ }),
+ WithContainersSource(
+ &testContainerSource{
+ containers: map[cid.ID]*container.Container{
+ cnrID: {
+ Value: cont,
+ },
+ },
+ },
+ ),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ )
+
+ tok := sessiontest.Object()
+ fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
+ tok.SetID(uuid.New())
+ tok.SetAuthKey(&fsPubKey)
+ tok.SetExp(100500)
+ tok.SetIat(1)
+ tok.SetNbf(1)
+ require.NoError(t, tok.Sign(signer.PrivateKey))
+
+ obj := objectSDK.New()
+ obj.SetContainerID(cnrID)
+ obj.SetSessionToken(tok)
+ obj.SetOwnerID(owner)
+ require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
+
+ require.NoError(t, v.Validate(context.Background(), obj, false))
+ })
+
+ t.Run("different issuer and owner, issuer is container node in current epoch, verify issuer enabled", func(t *testing.T) {
+ t.Parallel()
+
+ tok := sessiontest.Object()
+ fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
+ tok.SetID(uuid.New())
+ tok.SetAuthKey(&fsPubKey)
+ tok.SetExp(100500)
+ tok.SetIat(1)
+ tok.SetNbf(1)
+ require.NoError(t, tok.Sign(signer.PrivateKey))
+
+ cnrID := cidtest.ID()
+ cont := containerSDK.Container{}
+ cont.Init()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ cont.SetPlacementPolicy(pp)
+
+ var node netmap.NodeInfo
+ node.SetPublicKey(signer.PublicKey().Bytes())
+ currentEpochNM := &netmap.NetMap{}
+ currentEpochNM.SetEpoch(curEpoch)
+ currentEpochNM.SetNodes([]netmap.NodeInfo{node})
+
+ obj := objectSDK.New()
+ obj.SetContainerID(cnrID)
+ obj.SetSessionToken(tok)
+ obj.SetOwnerID(owner)
+ require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
+
+ v := NewFormatValidator(
+ WithNetState(testNetState{
+ epoch: curEpoch,
+ }),
+ WithLockSource(ls),
+ WithVerifySessionTokenIssuer(true),
+ WithInnerRing(&testIRSource{
+ irNodes: [][]byte{},
+ }),
+ WithContainersSource(
+ &testContainerSource{
+ containers: map[cid.ID]*container.Container{
+ cnrID: {
+ Value: cont,
+ },
+ },
+ },
+ ),
+ WithNetmapSource(
+ &utilTesting.TestNetmapSource{
+ Netmaps: map[uint64]*netmap.NetMap{
+ curEpoch: currentEpochNM,
+ },
+ CurrentEpoch: curEpoch,
+ },
+ ),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ )
+
+ require.NoError(t, v.Validate(context.Background(), obj, false))
+ })
+
+ t.Run("different issuer and owner, issuer is container node in previous epoch, verify issuer enabled", func(t *testing.T) {
+ t.Parallel()
+
+ tok := sessiontest.Object()
+ fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
+ tok.SetID(uuid.New())
+ tok.SetAuthKey(&fsPubKey)
+ tok.SetExp(100500)
+ tok.SetIat(1)
+ tok.SetNbf(1)
+ require.NoError(t, tok.Sign(signer.PrivateKey))
+
+ cnrID := cidtest.ID()
+ cont := containerSDK.Container{}
+ cont.Init()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ cont.SetPlacementPolicy(pp)
+
+ var issuerNode netmap.NodeInfo
+ issuerNode.SetPublicKey(signer.PublicKey().Bytes())
+
+ var nonIssuerNode netmap.NodeInfo
+ nonIssuerKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ nonIssuerNode.SetPublicKey(nonIssuerKey.PublicKey().Bytes())
+
+ currentEpochNM := &netmap.NetMap{}
+ currentEpochNM.SetEpoch(curEpoch)
+ currentEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode})
+
+ previousEpochNM := &netmap.NetMap{}
+ previousEpochNM.SetEpoch(curEpoch - 1)
+ previousEpochNM.SetNodes([]netmap.NodeInfo{issuerNode})
+
+ obj := objectSDK.New()
+ obj.SetContainerID(cnrID)
+ obj.SetSessionToken(tok)
+ obj.SetOwnerID(owner)
+ require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
+
+ v := NewFormatValidator(
+ WithNetState(testNetState{
+ epoch: curEpoch,
+ }),
+ WithLockSource(ls),
+ WithVerifySessionTokenIssuer(true),
+ WithInnerRing(&testIRSource{
+ irNodes: [][]byte{},
+ }),
+ WithContainersSource(
+ &testContainerSource{
+ containers: map[cid.ID]*container.Container{
+ cnrID: {
+ Value: cont,
+ },
+ },
+ },
+ ),
+ WithNetmapSource(
+ &utilTesting.TestNetmapSource{
+ Netmaps: map[uint64]*netmap.NetMap{
+ curEpoch: currentEpochNM,
+ curEpoch - 1: previousEpochNM,
+ },
+ CurrentEpoch: curEpoch,
+ },
+ ),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ )
+
+ require.NoError(t, v.Validate(context.Background(), obj, false))
+ })
+
+ t.Run("different issuer and owner, issuer is unknown, verify issuer enabled", func(t *testing.T) {
+ t.Parallel()
+
+ tok := sessiontest.Object()
+ fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
+ tok.SetID(uuid.New())
+ tok.SetAuthKey(&fsPubKey)
+ tok.SetExp(100500)
+ tok.SetIat(1)
+ tok.SetNbf(1)
+ require.NoError(t, tok.Sign(signer.PrivateKey))
+
+ cnrID := cidtest.ID()
+ cont := containerSDK.Container{}
+ cont.Init()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ cont.SetPlacementPolicy(pp)
+
+ var nonIssuerNode1 netmap.NodeInfo
+ nonIssuerKey1, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ nonIssuerNode1.SetPublicKey(nonIssuerKey1.PublicKey().Bytes())
+
+ var nonIssuerNode2 netmap.NodeInfo
+ nonIssuerKey2, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ nonIssuerNode2.SetPublicKey(nonIssuerKey2.PublicKey().Bytes())
+
+ currentEpochNM := &netmap.NetMap{}
+ currentEpochNM.SetEpoch(curEpoch)
+ currentEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode1})
+
+ previousEpochNM := &netmap.NetMap{}
+ previousEpochNM.SetEpoch(curEpoch - 1)
+ previousEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode2})
+
+ obj := objectSDK.New()
+ obj.SetContainerID(cnrID)
+ obj.SetSessionToken(tok)
+ obj.SetOwnerID(owner)
+ require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
+
+ v := NewFormatValidator(
+ WithNetState(testNetState{
+ epoch: curEpoch,
+ }),
+ WithLockSource(ls),
+ WithVerifySessionTokenIssuer(true),
+ WithInnerRing(&testIRSource{
+ irNodes: [][]byte{},
+ }),
+ WithContainersSource(
+ &testContainerSource{
+ containers: map[cid.ID]*container.Container{
+ cnrID: {
+ Value: cont,
+ },
+ },
+ },
+ ),
+ WithNetmapSource(
+ &utilTesting.TestNetmapSource{
+ Netmaps: map[uint64]*netmap.NetMap{
+ curEpoch: currentEpochNM,
+ curEpoch - 1: previousEpochNM,
+ },
+ CurrentEpoch: curEpoch,
+ },
+ ),
+ WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ )
+
+ require.Error(t, v.Validate(context.Background(), obj, false))
+ })
+}
+
+type testIRSource struct {
+ irNodes [][]byte
+}
+
+func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) {
+ return s.irNodes, nil
+}
+
+type testContainerSource struct {
+ containers map[cid.ID]*container.Container
+}
+
+func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
+ if cnr, found := s.containers[cnrID]; found {
+ return cnr, nil
+ }
+ return nil, fmt.Errorf("container not found")
+}
+
+func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
+ return nil, nil
+}
diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go
new file mode 100644
index 0000000000..aab12ebf93
--- /dev/null
+++ b/pkg/core/object/info.go
@@ -0,0 +1,34 @@
+package object
+
+import (
+ "fmt"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type ECInfo struct {
+ ParentID oid.ID
+ Index uint32
+ Total uint32
+}
+
+func (v *ECInfo) String() string {
+ if v == nil {
+ return ""
+ }
+ return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total)
+}
+
+// Info groups object address with its FrostFS
+// object info.
+type Info struct {
+ Address oid.Address
+ Type objectSDK.Type
+ IsLinkingObject bool
+ ECInfo *ECInfo
+}
+
+func (v Info) String() string {
+ return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo)
+}
diff --git a/pkg/core/object/object.go b/pkg/core/object/object.go
index 8fb656acdd..9c450966cb 100644
--- a/pkg/core/object/object.go
+++ b/pkg/core/object/object.go
@@ -1,12 +1,12 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
// AddressOf returns the address of the object.
-func AddressOf(obj *object.Object) oid.Address {
+func AddressOf(obj *objectSDK.Object) oid.Address {
var addr oid.Address
id, ok := obj.ID()
diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go
new file mode 100644
index 0000000000..3733ed5078
--- /dev/null
+++ b/pkg/core/object/sender_classifier.go
@@ -0,0 +1,164 @@
+package object
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "go.uber.org/zap"
+)
+
+type InnerRing interface {
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
+}
+
+type SenderClassifier struct {
+ log *logger.Logger
+ innerRing InnerRing
+ netmap core.Source
+}
+
+func NewSenderClassifier(innerRing InnerRing, netmap core.Source, log *logger.Logger) SenderClassifier {
+ return SenderClassifier{
+ log: log,
+ innerRing: innerRing,
+ netmap: netmap,
+ }
+}
+
+type ClassifyResult struct {
+ Role acl.Role
+ Key []byte
+}
+
+func (c SenderClassifier) Classify(
+ ctx context.Context,
+ ownerID *user.ID,
+ ownerKey *keys.PublicKey,
+ idCnr cid.ID,
+ cnr container.Container,
+) (res *ClassifyResult, err error) {
+ ownerKeyInBytes := ownerKey.Bytes()
+
+ // TODO: #767 get owner from frostfs.id if present
+
+ // if request owner is the same as container owner, return RoleUser
+ if ownerID.Equals(cnr.Owner()) {
+ return &ClassifyResult{
+ Role: acl.RoleOwner,
+ Key: ownerKeyInBytes,
+ }, nil
+ }
+
+ return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr)
+}
+
+func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
+ isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes)
+ if err != nil {
+ // do not throw error, try best case matching
+ c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
+ zap.Error(err))
+ } else if isInnerRingNode {
+ return &ClassifyResult{
+ Role: acl.RoleInnerRing,
+ Key: ownerKeyInBytes,
+ }, nil
+ }
+
+ binCnr := make([]byte, sha256.Size)
+ idCnr.Encode(binCnr)
+
+ isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr)
+ if err != nil {
+ // error might happen if request has `RoleOther` key and placement
+ // is not possible for previous epoch, so
+ // do not throw error, try best case matching
+ c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
+ zap.Error(err))
+ } else if isContainerNode {
+ return &ClassifyResult{
+ Role: acl.RoleContainer,
+ Key: ownerKeyInBytes,
+ }, nil
+ }
+
+ // if none of above, return RoleOthers
+ return &ClassifyResult{
+ Role: acl.RoleOthers,
+ Key: ownerKeyInBytes,
+ }, nil
+}
+
+func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) {
+ innerRingKeys, err := c.innerRing.InnerRingKeys(ctx)
+ if err != nil {
+ return false, err
+ }
+
+ // if request owner key in the inner ring list, return RoleSystem
+ for i := range innerRingKeys {
+ if bytes.Equal(innerRingKeys[i], owner) {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+func (c SenderClassifier) isContainerKey(
+ ctx context.Context,
+ owner, idCnr []byte,
+ cnr container.Container,
+) (bool, error) {
+ nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap
+ if err != nil {
+ return false, err
+ }
+
+ in, err := LookupKeyInContainer(nm, owner, idCnr, cnr)
+ if err != nil {
+ return false, err
+ } else if in {
+ return true, nil
+ }
+
+ // then check previous netmap, this can happen in-between epoch change
+ // when node migrates data from last epoch container
+ nm, err = core.GetPreviousNetworkMap(ctx, c.netmap)
+ if err != nil {
+ return false, err
+ }
+
+ return LookupKeyInContainer(nm, owner, idCnr, cnr)
+}
+
+func LookupKeyInContainer(
+ nm *netmap.NetMap,
+ pkey, idCnr []byte,
+ cnr container.Container,
+) (bool, error) {
+ cnrVectors, err := nm.ContainerNodes(cnr.PlacementPolicy(), idCnr)
+ if err != nil {
+ return false, err
+ }
+
+ for i := range cnrVectors {
+ for j := range cnrVectors[i] {
+ if bytes.Equal(cnrVectors[i][j].PublicKey(), pkey) {
+ return true, nil
+ }
+ }
+ }
+
+ return false, nil
+}
diff --git a/pkg/core/policy/ec.go b/pkg/core/policy/ec.go
new file mode 100644
index 0000000000..846af775a2
--- /dev/null
+++ b/pkg/core/policy/ec.go
@@ -0,0 +1,20 @@
+package policy
+
+import (
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
+
+// IsECPlacement returns True if policy is erasure coding policy.
+func IsECPlacement(policy netmapSDK.PlacementPolicy) bool {
+ return policy.NumberOfReplicas() == 1 && policy.ReplicaDescriptor(0).GetECDataCount() > 0
+}
+
+// ECDataCount returns EC data count for EC placement policy.
+func ECDataCount(policy netmapSDK.PlacementPolicy) int {
+ return int(policy.ReplicaDescriptor(0).GetECDataCount())
+}
+
+// ECParityCount returns EC parity count for EC placement policy.
+func ECParityCount(policy netmapSDK.PlacementPolicy) int {
+ return int(policy.ReplicaDescriptor(0).GetECParityCount())
+}
diff --git a/pkg/core/storagegroup/storagegroup.go b/pkg/core/storagegroup/storagegroup.go
deleted file mode 100644
index 6474377bb8..0000000000
--- a/pkg/core/storagegroup/storagegroup.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package storagegroup
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
-)
-
-// SearchSGPrm groups the parameters which are formed by Processor to search the storage group objects.
-type SearchSGPrm struct {
- Context context.Context
-
- Container cid.ID
-
- NodeInfo client.NodeInfo
-}
-
-// SearchSGDst groups the target values which Processor expects from SG searching to process.
-type SearchSGDst struct {
- Objects []oid.ID
-}
-
-// GetSGPrm groups parameter of GetSG operation.
-type GetSGPrm struct {
- Context context.Context
-
- OID oid.ID
- CID cid.ID
-
- NetMap netmap.NetMap
- Container [][]netmap.NodeInfo
-}
-
-// SGSource is a storage group information source interface.
-type SGSource interface {
- // ListSG must list storage group objects in the container. Formed list must be written to destination.
- //
- // Must return any error encountered which did not allow to form the list.
- ListSG(*SearchSGDst, SearchSGPrm) error
-
- // GetSG must return storage group object for the provided CID, OID,
- // container and netmap state.
- GetSG(GetSGPrm) (*storagegroup.StorageGroup, error)
-}
-
-// StorageGroup combines storage group object ID and its structure.
-type StorageGroup struct {
- id oid.ID
- sg storagegroup.StorageGroup
-}
-
-// ID returns object ID of the storage group.
-func (s StorageGroup) ID() oid.ID {
- return s.id
-}
-
-// SetID sets an object ID of the storage group.
-func (s *StorageGroup) SetID(id oid.ID) {
- s.id = id
-}
-
-// StorageGroup returns the storage group descriptor.
-func (s StorageGroup) StorageGroup() storagegroup.StorageGroup {
- return s.sg
-}
-
-// SetStorageGroup sets a storage group descriptor.
-func (s *StorageGroup) SetStorageGroup(sg storagegroup.StorageGroup) {
- s.sg = sg
-}
diff --git a/pkg/innerring/alphabet.go b/pkg/innerring/alphabet.go
index be16f82328..ddb3444038 100644
--- a/pkg/innerring/alphabet.go
+++ b/pkg/innerring/alphabet.go
@@ -52,103 +52,65 @@ const (
lastLetterNum
)
-// String returns l in config-compatible format.
-func (l GlagoliticLetter) String() string {
- switch l {
- default:
- return "unknown"
- case az:
- return "az"
- case buky:
- return "buky"
- case vedi:
- return "vedi"
- case glagoli:
- return "glagoli"
- case dobro:
- return "dobro"
- case yest:
- return "yest"
- case zhivete:
- return "zhivete"
- case dzelo:
- return "dzelo"
- case zemlja:
- return "zemlja"
- case izhe:
- return "izhe"
- case izhei:
- return "izhei"
- case gerv:
- return "gerv"
- case kako:
- return "kako"
- case ljudi:
- return "ljudi"
- case mislete:
- return "mislete"
- case nash:
- return "nash"
- case on:
- return "on"
- case pokoj:
- return "pokoj"
- case rtsi:
- return "rtsi"
- case slovo:
- return "slovo"
- case tverdo:
- return "tverdo"
- case uk:
- return "uk"
- case fert:
- return "fert"
- case kher:
- return "kher"
- case oht:
- return "oht"
- case shta:
- return "shta"
- case tsi:
- return "tsi"
- case cherv:
- return "cherv"
- case sha:
- return "sha"
- case yer:
- return "yer"
- case yeri:
- return "yeri"
- case yerj:
- return "yerj"
- case yat:
- return "yat"
- case jo:
- return "jo"
- case yu:
- return "yu"
- case smallYus:
- return "small.yus"
- case smallIotatedYus:
- return "small.iotated.yus"
- case bigYus:
- return "big.yus"
- case bigIotatedYus:
- return "big.iotated.yus"
- case fita:
- return "fita"
- case izhitsa:
- return "izhitsa"
- }
+var glagolicLetterToString = map[GlagoliticLetter]string{
+ az: "az",
+ buky: "buky",
+ vedi: "vedi",
+ glagoli: "glagoli",
+ dobro: "dobro",
+ yest: "yest",
+ zhivete: "zhivete",
+ dzelo: "dzelo",
+ zemlja: "zemlja",
+ izhe: "izhe",
+ izhei: "izhei",
+ gerv: "gerv",
+ kako: "kako",
+ ljudi: "ljudi",
+ mislete: "mislete",
+ nash: "nash",
+ on: "on",
+ pokoj: "pokoj",
+ rtsi: "rtsi",
+ slovo: "slovo",
+ tverdo: "tverdo",
+ uk: "uk",
+ fert: "fert",
+ kher: "kher",
+ oht: "oht",
+ shta: "shta",
+ tsi: "tsi",
+ cherv: "cherv",
+ sha: "sha",
+ yer: "yer",
+ yeri: "yeri",
+ yerj: "yerj",
+ yat: "yat",
+ jo: "jo",
+ yu: "yu",
+ smallYus: "small.yus",
+ smallIotatedYus: "small.iotated.yus",
+ bigYus: "big.yus",
+ bigIotatedYus: "big.iotated.yus",
+ fita: "fita",
+ izhitsa: "izhitsa",
}
-type alphabetContracts map[GlagoliticLetter]util.Uint160
+// String returns l in config-compatible format.
+func (l GlagoliticLetter) String() string {
+ if str, found := glagolicLetterToString[l]; found {
+ return str
+ }
+ return "unknown"
+}
-func newAlphabetContracts() alphabetContracts {
+type AlphabetContracts map[GlagoliticLetter]util.Uint160
+
+func NewAlphabetContracts() AlphabetContracts {
return make(map[GlagoliticLetter]util.Uint160, lastLetterNum)
}
-func (a alphabetContracts) GetByIndex(ind int) (util.Uint160, bool) {
+func (a AlphabetContracts) GetByIndex(ind int) (util.Uint160, bool) {
if ind < 0 || ind >= int(lastLetterNum) {
return util.Uint160{}, false
}
@@ -158,16 +120,16 @@ func (a alphabetContracts) GetByIndex(ind int) (util.Uint160, bool) {
return contract, ok
}
-func (a alphabetContracts) indexOutOfRange(ind int) bool {
+func (a AlphabetContracts) indexOutOfRange(ind int) bool {
return ind < 0 && ind >= len(a)
}
-func (a alphabetContracts) iterate(f func(GlagoliticLetter, util.Uint160)) {
+func (a AlphabetContracts) iterate(f func(GlagoliticLetter, util.Uint160)) {
for letter, contract := range a {
f(letter, contract)
}
}
-func (a *alphabetContracts) set(l GlagoliticLetter, h util.Uint160) {
+func (a *AlphabetContracts) set(l GlagoliticLetter, h util.Uint160) {
(*a)[l] = h
}
diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go
index 0e10125c37..dfada764aa 100644
--- a/pkg/innerring/bindings.go
+++ b/pkg/innerring/bindings.go
@@ -8,20 +8,13 @@ type (
// ContractProcessor interface defines functions for binding event producers
// such as event.Listener and Timers with contract processor.
ContractProcessor interface {
- ListenerNotificationParsers() []event.NotificationParserInfo
ListenerNotificationHandlers() []event.NotificationHandlerInfo
ListenerNotaryParsers() []event.NotaryParserInfo
ListenerNotaryHandlers() []event.NotaryHandlerInfo
- TimersHandlers() []event.NotificationHandlerInfo
}
)
func connectListenerWithProcessor(l event.Listener, p ContractProcessor) {
- // register notification parsers
- for _, parser := range p.ListenerNotificationParsers() {
- l.SetNotificationParser(parser)
- }
-
// register notification handlers
for _, handler := range p.ListenerNotificationHandlers() {
l.RegisterNotificationHandler(handler)
diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go
index 747f36fdff..3f9d8df5ff 100644
--- a/pkg/innerring/blocktimer.go
+++ b/pkg/innerring/blocktimer.go
@@ -4,14 +4,9 @@ import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
- "go.uber.org/zap"
)
type (
@@ -20,27 +15,12 @@ type (
EpochDuration() uint64
}
- subEpochEventHandler struct {
- handler event.Handler // handle to execute
- durationMul uint32 // X: X/Y of epoch in blocks
- durationDiv uint32 // Y: X/Y of epoch in blocks
- }
-
newEpochHandler func()
epochTimerArgs struct {
- l *logger.Logger
-
newEpochHandlers []newEpochHandler
- cnrWrapper *container.Client // to invoke stop container estimation
- epoch epochState // to specify which epoch to stop, and epoch duration
-
- stopEstimationDMul uint32 // X: X/Y of epoch in blocks
- stopEstimationDDiv uint32 // Y: X/Y of epoch in blocks
-
- collectBasicIncome subEpochEventHandler
- distributeBasicIncome subEpochEventHandler
+ epoch epochState // to specify which epoch to stop, and epoch duration
}
emitTimerArgs struct {
@@ -49,7 +29,7 @@ type (
emitDuration uint32 // in blocks
}
- depositor func() (util.Uint256, error)
+ depositor func(context.Context) (util.Uint256, error)
awaiter func(context.Context, util.Uint256) error
)
@@ -74,7 +54,7 @@ func (s *Server) tickTimers(h uint32) {
}
func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
- epochTimer := timer.NewBlockTimer(
+ return timer.NewBlockTimer(
func() (uint32, error) {
return uint32(args.epoch.EpochDuration()), nil
},
@@ -84,65 +64,13 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
}
},
)
-
- // sub-timer for epoch timer to tick stop container estimation events at
- // some block in epoch
- epochTimer.OnDelta(
- args.stopEstimationDMul,
- args.stopEstimationDDiv,
- func() {
- epochN := args.epoch.EpochCounter()
- if epochN == 0 { // estimates are invalid in genesis epoch
- return
- }
-
- prm := container.StopEstimationPrm{}
- prm.SetEpoch(epochN - 1)
-
- err := args.cnrWrapper.StopEstimation(prm)
- if err != nil {
- args.l.Warn("can't stop epoch estimation",
- zap.Uint64("epoch", epochN),
- zap.String("error", err.Error()))
- }
- })
-
- epochTimer.OnDelta(
- args.collectBasicIncome.durationMul,
- args.collectBasicIncome.durationDiv,
- func() {
- epochN := args.epoch.EpochCounter()
- if epochN == 0 { // estimates are invalid in genesis epoch
- return
- }
-
- args.collectBasicIncome.handler(
- settlement.NewBasicIncomeCollectEvent(epochN - 1),
- )
- })
-
- epochTimer.OnDelta(
- args.distributeBasicIncome.durationMul,
- args.distributeBasicIncome.durationDiv,
- func() {
- epochN := args.epoch.EpochCounter()
- if epochN == 0 { // estimates are invalid in genesis epoch
- return
- }
-
- args.distributeBasicIncome.handler(
- settlement.NewBasicIncomeDistributeEvent(epochN - 1),
- )
- })
-
- return epochTimer
}
-func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer {
+func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer {
return timer.NewBlockTimer(
timer.StaticBlockMeter(args.emitDuration),
func() {
- args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{})
+ args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{})
},
)
}
diff --git a/pkg/innerring/blocktimer_test.go b/pkg/innerring/blocktimer_test.go
new file mode 100644
index 0000000000..4cbe7e3948
--- /dev/null
+++ b/pkg/innerring/blocktimer_test.go
@@ -0,0 +1,85 @@
+package innerring
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestEpochTimer(t *testing.T) {
+ t.Parallel()
+ neh := &testNewEpochHandler{}
+ epochState := &testEpochState{
+ counter: 99,
+ duration: 10,
+ }
+
+ args := &epochTimerArgs{
+ newEpochHandlers: []newEpochHandler{neh.Handle},
+ epoch: epochState,
+ }
+ et := newEpochTimer(args)
+ err := et.Reset()
+ require.NoError(t, err, "failed to reset timer")
+
+ et.Tick(100)
+ require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+
+ et.Tick(101)
+ require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+
+ et.Tick(102)
+ require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+
+ et.Tick(103)
+ require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+
+ var h uint32
+ for h = 104; h < 109; h++ {
+ et.Tick(h)
+ require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ }
+
+ et.Tick(109)
+ require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+
+ et.Tick(110)
+ require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+
+ et.Tick(111)
+ require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+
+ et.Tick(112)
+ require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+
+ et.Tick(113)
+ require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+
+ for h = 114; h < 119; h++ {
+ et.Tick(h)
+ require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ }
+ et.Tick(120)
+ require.Equal(t, 2, neh.called, "invalid new epoch handler calls")
+}
+
+type testNewEpochHandler struct {
+ called int
+}
+
+func (h *testNewEpochHandler) Handle() {
+ h.called++
+}
+
+type testEpochState struct {
+ counter uint64
+ duration uint64
+}
+
+func (s *testEpochState) EpochCounter() uint64 {
+ return s.counter
+}
+
+func (s *testEpochState) EpochDuration() uint64 {
+ return s.duration
+}
diff --git a/pkg/innerring/config/fee.go b/pkg/innerring/config/fee.go
index d776856431..a26a7bcc63 100644
--- a/pkg/innerring/config/fee.go
+++ b/pkg/innerring/config/fee.go
@@ -8,19 +8,15 @@ import (
// FeeConfig is an instance that returns extra fee values for contract
// invocations without notary support.
type FeeConfig struct {
- registerNamedCnr,
mainchain,
sidechain fixedn.Fixed8
}
// NewFeeConfig constructs FeeConfig from viper.Viper instance. Latter must not be nil.
-//
-// Fee for named container registration is taken from "fee.named_container_register" value.
func NewFeeConfig(v *viper.Viper) *FeeConfig {
return &FeeConfig{
- registerNamedCnr: fixedn.Fixed8(v.GetInt64("fee.named_container_register")),
- mainchain: fixedn.Fixed8(v.GetInt64("fee.main_chain")),
- sidechain: fixedn.Fixed8(v.GetInt64("fee.side_chain")),
+ mainchain: fixedn.Fixed8(v.GetInt64("fee.main_chain")),
+ sidechain: fixedn.Fixed8(v.GetInt64("fee.side_chain")),
}
}
@@ -31,8 +27,3 @@ func (f FeeConfig) MainChainFee() fixedn.Fixed8 {
func (f FeeConfig) SideChainFee() fixedn.Fixed8 {
return f.sidechain
}
-
-// NamedContainerRegistrationFee returns additional GAS fee for named container registration in FrostFS network.
-func (f FeeConfig) NamedContainerRegistrationFee() fixedn.Fixed8 {
- return f.registerNamedCnr
-}
diff --git a/pkg/innerring/config/fee_test.go b/pkg/innerring/config/fee_test.go
new file mode 100644
index 0000000000..ced21b2385
--- /dev/null
+++ b/pkg/innerring/config/fee_test.go
@@ -0,0 +1,63 @@
+package config
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/spf13/viper"
+ "github.com/stretchr/testify/require"
+)
+
+func TestConfig(t *testing.T) {
+ t.Parallel()
+ t.Run("all set", func(t *testing.T) {
+ t.Parallel()
+ file := strings.NewReader(
+ `
+fee:
+ main_chain: 50000000
+ side_chain: 200000000
+`,
+ )
+ v := viper.New()
+ v.SetConfigType("yaml")
+ err := v.ReadConfig(file)
+ require.NoError(t, err, "read config file failed")
+
+ config := NewFeeConfig(v)
+ require.Equal(t, fixedn.Fixed8(50000000), config.MainChainFee(), "main chain fee invalid")
+ require.Equal(t, fixedn.Fixed8(200000000), config.SideChainFee(), "side chain fee invalid")
+ })
+
+ t.Run("nothing set", func(t *testing.T) {
+ t.Parallel()
+ file := strings.NewReader("")
+ v := viper.New()
+ v.SetConfigType("yaml")
+ err := v.ReadConfig(file)
+ require.NoError(t, err, "read config file failed")
+
+ config := NewFeeConfig(v)
+ require.Equal(t, fixedn.Fixed8(0), config.MainChainFee(), "main chain fee invalid")
+ require.Equal(t, fixedn.Fixed8(0), config.SideChainFee(), "side chain fee invalid")
+ })
+
+ t.Run("partially set", func(t *testing.T) {
+ t.Parallel()
+ file := strings.NewReader(
+ `
+fee:
+ main_chain: 10
+`,
+ )
+ v := viper.New()
+ v.SetConfigType("yaml")
+ err := v.ReadConfig(file)
+ require.NoError(t, err, "read config file failed")
+
+ config := NewFeeConfig(v)
+ require.Equal(t, fixedn.Fixed8(10), config.MainChainFee(), "main chain fee invalid")
+ require.Equal(t, fixedn.Fixed8(0), config.SideChainFee(), "side chain fee invalid")
+ })
+}
diff --git a/pkg/innerring/contracts.go b/pkg/innerring/contracts.go
index c280eb4bf6..4a80296f49 100644
--- a/pkg/innerring/contracts.go
+++ b/pkg/innerring/contracts.go
@@ -15,17 +15,14 @@ type contracts struct {
netmap util.Uint160 // in morph
balance util.Uint160 // in morph
container util.Uint160 // in morph
- audit util.Uint160 // in morph
proxy util.Uint160 // in morph
processing util.Uint160 // in mainnet
- reputation util.Uint160 // in morph
- subnet util.Uint160 // in morph
frostfsID util.Uint160 // in morph
- alphabet alphabetContracts // in morph
+ alphabet AlphabetContracts // in morph
}
-func parseContracts(cfg *viper.Viper, morph *client.Client, withoutMainNet, withoutMainNotary, withoutSideNotary bool) (*contracts, error) {
+func parseContracts(cfg *viper.Viper, morph nnsResolver, withoutMainNet, withoutMainNotary bool) (*contracts, error) {
var (
result = new(contracts)
err error
@@ -45,11 +42,9 @@ func parseContracts(cfg *viper.Viper, morph *client.Client, withoutMainNet, with
}
}
- if !withoutSideNotary {
- result.proxy, err = parseContract(cfg, morph, "contracts.proxy", client.NNSProxyContractName)
- if err != nil {
- return nil, fmt.Errorf("can't get proxy script hash: %w", err)
- }
+ result.proxy, err = parseContract(cfg, morph, "contracts.proxy", client.NNSProxyContractName)
+ if err != nil {
+ return nil, fmt.Errorf("can't get proxy script hash: %w", err)
}
targets := [...]struct {
@@ -60,9 +55,6 @@ func parseContracts(cfg *viper.Viper, morph *client.Client, withoutMainNet, with
{"contracts.netmap", client.NNSNetmapContractName, &result.netmap},
{"contracts.balance", client.NNSBalanceContractName, &result.balance},
{"contracts.container", client.NNSContainerContractName, &result.container},
- {"contracts.audit", client.NNSAuditContractName, &result.audit},
- {"contracts.reputation", client.NNSReputationContractName, &result.reputation},
- {"contracts.subnet", client.NNSSubnetworkContractName, &result.subnet},
{"contracts.frostfsid", client.NNSFrostFSIDContractName, &result.frostfsID},
}
@@ -82,9 +74,9 @@ func parseContracts(cfg *viper.Viper, morph *client.Client, withoutMainNet, with
return result, nil
}
-func parseAlphabetContracts(cfg *viper.Viper, morph *client.Client) (alphabetContracts, error) {
+func parseAlphabetContracts(cfg *viper.Viper, morph nnsResolver) (AlphabetContracts, error) {
num := GlagoliticLetter(cfg.GetUint("contracts.alphabet.amount"))
- alpha := newAlphabetContracts()
+ alpha := NewAlphabetContracts()
if num > lastLetterNum {
return nil, fmt.Errorf("amount of alphabet contracts overflows glagolitsa %d > %d", num, lastLetterNum)
@@ -121,7 +113,7 @@ func parseAlphabetContracts(cfg *viper.Viper, morph *client.Client) (alphabetCon
return alpha, nil
}
-func parseContract(cfg *viper.Viper, morph *client.Client, cfgName, nnsName string) (res util.Uint160, err error) {
+func parseContract(cfg *viper.Viper, morph nnsResolver, cfgName, nnsName string) (res util.Uint160, err error) {
contractStr := cfg.GetString(cfgName)
if len(contractStr) == 0 {
return morph.NNSContractAddress(nnsName)
@@ -129,3 +121,7 @@ func parseContract(cfg *viper.Viper, morph *client.Client, cfgName, nnsName stri
return util.Uint160DecodeStringLE(contractStr)
}
+
+type nnsResolver interface {
+ NNSContractAddress(name string) (sh util.Uint160, err error)
+}
diff --git a/pkg/innerring/contracts_test.go b/pkg/innerring/contracts_test.go
new file mode 100644
index 0000000000..0fb0442b2b
--- /dev/null
+++ b/pkg/innerring/contracts_test.go
@@ -0,0 +1,212 @@
+package innerring
+
+import (
+ "strings"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/spf13/viper"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseContractsSuccess(t *testing.T) {
+ t.Parallel()
+ file := strings.NewReader(`
+contracts:
+ frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62
+ processing: 597f5894867113a41e192801709c02497f611de8
+ balance: d2aa48d14b17b11bc4c68205027884a96706dd16
+ container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6
+ frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a
+ netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742
+ proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c
+ alphabet:
+ amount: 2
+ az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea
+ buky: e2ba789320899658b100f331bdebb74474757920
+`)
+
+ v := viper.New()
+ v.SetConfigType("yaml")
+ err := v.ReadConfig(file)
+ require.NoError(t, err, "read config file failed")
+
+ t.Run("all enabled", func(t *testing.T) {
+ t.Parallel()
+ c, err := parseContracts(v, nil, false, false)
+ require.NoError(t, err, "failed to parse contracts")
+
+ frostfsExp, _ := util.Uint160DecodeStringLE("ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62")
+ require.Equal(t, frostfsExp, c.frostfs, "invalid frostfs")
+
+ processingExp, _ := util.Uint160DecodeStringLE("597f5894867113a41e192801709c02497f611de8")
+ require.Equal(t, processingExp, c.processing, "invalid processing")
+
+ balanceExp, _ := util.Uint160DecodeStringLE("d2aa48d14b17b11bc4c68205027884a96706dd16")
+ require.Equal(t, balanceExp, c.balance, "invalid balance")
+
+ containerExp, _ := util.Uint160DecodeStringLE("ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6")
+ require.Equal(t, containerExp, c.container, "invalid container")
+
+ frostfsIDExp, _ := util.Uint160DecodeStringLE("9f5866decbc751a099e74c7c7bc89f609201755a")
+ require.Equal(t, frostfsIDExp, c.frostfsID, "invalid frostfsID")
+
+ netmapIDExp, _ := util.Uint160DecodeStringLE("83c600c81d47a1b1b7cf58eb49ae7ee7240dc742")
+ require.Equal(t, netmapIDExp, c.netmap, "invalid netmap")
+
+ proxyExp, _ := util.Uint160DecodeStringLE("abc8794bb40a21f2db5f21ae62741eb46c8cad1c")
+ require.Equal(t, proxyExp, c.proxy, "invalid proxy")
+
+ require.Equal(t, 2, len(c.alphabet), "invalid alphabet contracts length")
+
+ azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea")
+ require.Equal(t, azExp, c.alphabet[az], "invalid az")
+
+ bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920")
+ require.Equal(t, bukyExp, c.alphabet[buky], "invalid buky")
+ })
+
+ t.Run("all disabled", func(t *testing.T) {
+ t.Parallel()
+ c, err := parseContracts(v, nil, true, true)
+ require.NoError(t, err, "failed to parse contracts")
+
+ require.Equal(t, util.Uint160{}, c.frostfs, "invalid frostfs")
+
+ require.Equal(t, util.Uint160{}, c.processing, "invalid processing")
+
+ balanceExp, _ := util.Uint160DecodeStringLE("d2aa48d14b17b11bc4c68205027884a96706dd16")
+ require.Equal(t, balanceExp, c.balance, "invalid balance")
+
+ containerExp, _ := util.Uint160DecodeStringLE("ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6")
+ require.Equal(t, containerExp, c.container, "invalid container")
+
+ frostfsIDExp, _ := util.Uint160DecodeStringLE("9f5866decbc751a099e74c7c7bc89f609201755a")
+ require.Equal(t, frostfsIDExp, c.frostfsID, "invalid frostfsID")
+
+ netmapIDExp, _ := util.Uint160DecodeStringLE("83c600c81d47a1b1b7cf58eb49ae7ee7240dc742")
+ require.Equal(t, netmapIDExp, c.netmap, "invalid netmap")
+
+ proxyExp, _ := util.Uint160DecodeStringLE("abc8794bb40a21f2db5f21ae62741eb46c8cad1c")
+ require.Equal(t, proxyExp, c.proxy, "invalid proxy")
+
+ require.Equal(t, 2, len(c.alphabet), "invalid alphabet contracts length")
+
+ azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea")
+ require.Equal(t, azExp, c.alphabet[az], "invalid az")
+
+ bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920")
+ require.Equal(t, bukyExp, c.alphabet[buky], "invalid buky")
+ })
+
+ t.Run("main notary disabled", func(t *testing.T) {
+ t.Parallel()
+ c, err := parseContracts(v, nil, false, true)
+ require.NoError(t, err, "failed to parse contracts")
+
+ frostfsExp, _ := util.Uint160DecodeStringLE("ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62")
+ require.Equal(t, frostfsExp, c.frostfs, "invalid frostfs")
+
+ require.Equal(t, util.Uint160{}, c.processing, "invalid processing")
+
+ balanceExp, _ := util.Uint160DecodeStringLE("d2aa48d14b17b11bc4c68205027884a96706dd16")
+ require.Equal(t, balanceExp, c.balance, "invalid balance")
+
+ containerExp, _ := util.Uint160DecodeStringLE("ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6")
+ require.Equal(t, containerExp, c.container, "invalid container")
+
+ frostfsIDExp, _ := util.Uint160DecodeStringLE("9f5866decbc751a099e74c7c7bc89f609201755a")
+ require.Equal(t, frostfsIDExp, c.frostfsID, "invalid frostfsID")
+
+ netmapIDExp, _ := util.Uint160DecodeStringLE("83c600c81d47a1b1b7cf58eb49ae7ee7240dc742")
+ require.Equal(t, netmapIDExp, c.netmap, "invalid netmap")
+
+ proxyExp, _ := util.Uint160DecodeStringLE("abc8794bb40a21f2db5f21ae62741eb46c8cad1c")
+ require.Equal(t, proxyExp, c.proxy, "invalid proxy")
+
+ require.Equal(t, 2, len(c.alphabet), "invalid alphabet contracts length")
+
+ azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea")
+ require.Equal(t, azExp, c.alphabet[az], "invalid az")
+
+ bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920")
+ require.Equal(t, bukyExp, c.alphabet[buky], "invalid buky")
+ })
+}
+
+func TestParseContractsInvalid(t *testing.T) {
+ t.Parallel()
+ t.Run("invalid frostfs contract", func(t *testing.T) {
+ t.Parallel()
+ file := strings.NewReader(`
+contracts:
+ frostfs: invalid_data
+ processing: 597f5894867113a41e192801709c02497f611de8
+ balance: d2aa48d14b17b11bc4c68205027884a96706dd16
+ container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6
+ frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a
+ netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742
+ proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c
+ alphabet:
+ amount: 2
+ az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea
+ buky: e2ba789320899658b100f331bdebb74474757920
+`)
+
+ v := viper.New()
+ v.SetConfigType("yaml")
+ err := v.ReadConfig(file)
+ require.NoError(t, err, "read config file failed")
+
+ _, err = parseContracts(v, nil, false, false)
+ require.Error(t, err, "unexpected success")
+ })
+
+ t.Run("invalid alphabet count", func(t *testing.T) {
+ t.Parallel()
+ file := strings.NewReader(`
+contracts:
+ frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62
+ processing: 597f5894867113a41e192801709c02497f611de8
+ balance: d2aa48d14b17b11bc4c68205027884a96706dd16
+ container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6
+ frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a
+ netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742
+ proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c
+ alphabet:
+ amount: 3
+ az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea
+ buky: e2ba789320899658b100f331bdebb74474757920
+`)
+
+ v := viper.New()
+ v.SetConfigType("yaml")
+ err := v.ReadConfig(file)
+ require.NoError(t, err, "read config file failed")
+
+ azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea")
+ bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920")
+
+ morph := &testParserMorph{
+ values: map[string]util.Uint160{
+ "az": azExp,
+ "buky": bukyExp,
+ },
+ }
+
+ _, err = parseContracts(v, morph, false, false)
+ require.ErrorContains(t, err, "could not read all contracts: required 3, read 2", "unexpected success")
+ })
+}
+
+type testParserMorph struct {
+ values map[string]util.Uint160
+}
+
+func (m *testParserMorph) NNSContractAddress(name string) (sh util.Uint160, err error) {
+ if value, found := m.values[name]; found {
+ return value, nil
+ }
+ return util.Uint160{}, client.ErrNNSRecordNotFound
+}
diff --git a/pkg/innerring/fetcher.go b/pkg/innerring/fetcher.go
index 4a80ebf3bf..7deec3f317 100644
--- a/pkg/innerring/fetcher.go
+++ b/pkg/innerring/fetcher.go
@@ -1,6 +1,8 @@
package innerring
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -47,12 +49,12 @@ type IrFetcherWithoutNotary struct {
// InnerRingKeys fetches list of innerring keys from NeoFSAlphabet
// role in the sidechain.
-func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) {
- return fN.cli.NeoFSAlphabetList()
+func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) {
+ return fN.cli.NeoFSAlphabetList(ctx)
}
// InnerRingKeys fetches list of innerring keys from netmap contract
// in the sidechain.
-func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) {
- return f.nm.GetInnerRingList()
+func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) {
+ return f.nm.GetInnerRingList(ctx)
}
diff --git a/pkg/innerring/indexer.go b/pkg/innerring/indexer.go
index ac5fb93efb..439400baca 100644
--- a/pkg/innerring/indexer.go
+++ b/pkg/innerring/indexer.go
@@ -1,7 +1,7 @@
package innerring
import (
- "bytes"
+ "context"
"fmt"
"sync"
"time"
@@ -11,7 +11,7 @@ import (
type (
irFetcher interface {
- InnerRingKeys() (keys.PublicKeys, error)
+ InnerRingKeys(ctx context.Context) (keys.PublicKeys, error)
}
committeeFetcher interface {
@@ -46,7 +46,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK
}
}
-func (s *innerRingIndexer) update() (ind indexes, err error) {
+func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) {
s.RLock()
if time.Since(s.lastAccess) < s.timeout {
@@ -63,7 +63,7 @@ func (s *innerRingIndexer) update() (ind indexes, err error) {
return s.ind, nil
}
- innerRing, err := s.irFetcher.InnerRingKeys()
+ innerRing, err := s.irFetcher.InnerRingKeys(ctx)
if err != nil {
return indexes{}, err
}
@@ -82,8 +82,8 @@ func (s *innerRingIndexer) update() (ind indexes, err error) {
return s.ind, nil
}
-func (s *innerRingIndexer) InnerRingIndex() (int32, error) {
- ind, err := s.update()
+func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) {
+ ind, err := s.update(ctx)
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -91,8 +91,8 @@ func (s *innerRingIndexer) InnerRingIndex() (int32, error) {
return ind.innerRingIndex, nil
}
-func (s *innerRingIndexer) InnerRingSize() (int32, error) {
- ind, err := s.update()
+func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) {
+ ind, err := s.update(ctx)
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -100,8 +100,8 @@ func (s *innerRingIndexer) InnerRingSize() (int32, error) {
return ind.innerRingSize, nil
}
-func (s *innerRingIndexer) AlphabetIndex() (int32, error) {
- ind, err := s.update()
+func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) {
+ ind, err := s.update(ctx)
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -111,16 +111,11 @@ func (s *innerRingIndexer) AlphabetIndex() (int32, error) {
// keyPosition returns "-1" if key is not found in the list, otherwise returns
// index of the key.
-func keyPosition(key *keys.PublicKey, list keys.PublicKeys) (result int32) {
- result = -1
- rawBytes := key.Bytes()
-
+func keyPosition(key *keys.PublicKey, list keys.PublicKeys) int32 {
for i := range list {
- if bytes.Equal(list[i].Bytes(), rawBytes) {
- result = int32(i)
- break
+ if key.Equal(list[i]) {
+ return int32(i)
}
}
-
- return result
+ return -1
}
diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go
new file mode 100644
index 0000000000..f8201b7df4
--- /dev/null
+++ b/pkg/innerring/indexer_test.go
@@ -0,0 +1,246 @@
+package innerring
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIndexerReturnsIndexes(t *testing.T) {
+ t.Parallel()
+ commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{
+ "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae",
+ "022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131",
+ })
+ require.NoError(t, err, "convert string to commitee public keys failed")
+ cf := &testCommiteeFetcher{
+ keys: commiteeKeys,
+ }
+
+ irKeys, err := keys.NewPublicKeysFromStrings([]string{
+ "038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35",
+ "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3",
+ "022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131",
+ })
+ require.NoError(t, err, "convert string to IR public keys failed")
+ irf := &testIRFetcher{
+ keys: irKeys,
+ }
+
+ t.Run("success", func(t *testing.T) {
+ t.Parallel()
+ key := irKeys[2]
+
+ indexer := newInnerRingIndexer(cf, irf, key, time.Second)
+
+ idx, err := indexer.AlphabetIndex(context.Background())
+ require.NoError(t, err, "failed to get alphabet index")
+ require.Equal(t, int32(1), idx, "invalid alphabet index")
+
+ idx, err = indexer.InnerRingIndex(context.Background())
+ require.NoError(t, err, "failed to get IR index")
+ require.Equal(t, int32(2), idx, "invalid IR index")
+
+ size, err := indexer.InnerRingSize(context.Background())
+ require.NoError(t, err, "failed to get IR size")
+ require.Equal(t, int32(3), size, "invalid IR size")
+ })
+
+ t.Run("not found alphabet", func(t *testing.T) {
+ t.Parallel()
+ key := irKeys[0]
+
+ indexer := newInnerRingIndexer(cf, irf, key, time.Second)
+
+ idx, err := indexer.AlphabetIndex(context.Background())
+ require.NoError(t, err, "failed to get alphabet index")
+ require.Equal(t, int32(-1), idx, "invalid alphabet index")
+
+ idx, err = indexer.InnerRingIndex(context.Background())
+ require.NoError(t, err, "failed to get IR index")
+ require.Equal(t, int32(0), idx, "invalid IR index")
+ })
+
+ t.Run("not found IR", func(t *testing.T) {
+ t.Parallel()
+ key := commiteeKeys[0]
+
+ indexer := newInnerRingIndexer(cf, irf, key, time.Second)
+
+ idx, err := indexer.AlphabetIndex(context.Background())
+ require.NoError(t, err, "failed to get alphabet index")
+ require.Equal(t, int32(0), idx, "invalid alphabet index")
+
+ idx, err = indexer.InnerRingIndex(context.Background())
+ require.NoError(t, err, "failed to get IR index")
+ require.Equal(t, int32(-1), idx, "invalid IR index")
+ })
+}
+
+func TestIndexerCachesIndexes(t *testing.T) {
+ t.Parallel()
+ commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{})
+ require.NoError(t, err, "convert string to commitee public keys failed")
+ cf := &testCommiteeFetcher{
+ keys: commiteeKeys,
+ }
+
+ irKeys, err := keys.NewPublicKeysFromStrings([]string{})
+ require.NoError(t, err, "convert string to IR public keys failed")
+ irf := &testIRFetcher{
+ keys: irKeys,
+ }
+
+ key, err := keys.NewPublicKeyFromString("022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131")
+ require.NoError(t, err, "convert string to public key failed")
+
+ indexer := newInnerRingIndexer(cf, irf, key, time.Second)
+
+ idx, err := indexer.AlphabetIndex(context.Background())
+ require.NoError(t, err, "failed to get alphabet index")
+ require.Equal(t, int32(-1), idx, "invalid alphabet index")
+
+ idx, err = indexer.InnerRingIndex(context.Background())
+ require.NoError(t, err, "failed to get IR index")
+ require.Equal(t, int32(-1), idx, "invalid IR index")
+
+ size, err := indexer.InnerRingSize(context.Background())
+ require.NoError(t, err, "failed to get IR size")
+ require.Equal(t, int32(0), size, "invalid IR size")
+
+ require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count")
+ require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count")
+
+ idx, err = indexer.AlphabetIndex(context.Background())
+ require.NoError(t, err, "failed to get alphabet index")
+ require.Equal(t, int32(-1), idx, "invalid alphabet index")
+
+ idx, err = indexer.InnerRingIndex(context.Background())
+ require.NoError(t, err, "failed to get IR index")
+ require.Equal(t, int32(-1), idx, "invalid IR index")
+
+ size, err = indexer.InnerRingSize(context.Background())
+ require.NoError(t, err, "failed to get IR size")
+ require.Equal(t, int32(0), size, "invalid IR size")
+
+ require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count")
+ require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count")
+
+ time.Sleep(2 * time.Second)
+
+ idx, err = indexer.AlphabetIndex(context.Background())
+ require.NoError(t, err, "failed to get alphabet index")
+ require.Equal(t, int32(-1), idx, "invalid alphabet index")
+
+ idx, err = indexer.InnerRingIndex(context.Background())
+ require.NoError(t, err, "failed to get IR index")
+ require.Equal(t, int32(-1), idx, "invalid IR index")
+
+ size, err = indexer.InnerRingSize(context.Background())
+ require.NoError(t, err, "failed to get IR size")
+ require.Equal(t, int32(0), size, "invalid IR size")
+
+ require.Equal(t, int32(2), cf.calls.Load(), "invalid commitee calls count")
+ require.Equal(t, int32(2), irf.calls.Load(), "invalid IR calls count")
+}
+
+func TestIndexerThrowsErrors(t *testing.T) {
+ t.Parallel()
+ cf := &testCommiteeFetcher{
+ err: fmt.Errorf("test commitee error"),
+ }
+
+ irKeys, err := keys.NewPublicKeysFromStrings([]string{})
+ require.NoError(t, err, "convert string to IR public keys failed")
+ irf := &testIRFetcher{
+ keys: irKeys,
+ }
+
+ key, err := keys.NewPublicKeyFromString("022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131")
+ require.NoError(t, err, "convert string to public key failed")
+
+ indexer := newInnerRingIndexer(cf, irf, key, time.Second)
+
+ idx, err := indexer.AlphabetIndex(context.Background())
+ require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed")
+ require.Equal(t, int32(0), idx, "invalid alphabet index")
+
+ idx, err = indexer.InnerRingIndex(context.Background())
+ require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
+ require.Equal(t, int32(0), idx, "invalid IR index")
+
+ size, err := indexer.InnerRingSize(context.Background())
+ require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
+ require.Equal(t, int32(0), size, "invalid IR size")
+
+ commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{})
+ require.NoError(t, err, "convert string to commitee public keys failed")
+ cf = &testCommiteeFetcher{
+ keys: commiteeKeys,
+ }
+
+ irf = &testIRFetcher{
+ err: fmt.Errorf("test IR error"),
+ }
+
+ indexer = newInnerRingIndexer(cf, irf, key, time.Second)
+
+ idx, err = indexer.AlphabetIndex(context.Background())
+ require.ErrorContains(t, err, "test IR error", "error from commitee not throwed")
+ require.Equal(t, int32(0), idx, "invalid alphabet index")
+
+ idx, err = indexer.InnerRingIndex(context.Background())
+ require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
+ require.Equal(t, int32(0), idx, "invalid IR index")
+
+ size, err = indexer.InnerRingSize(context.Background())
+ require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
+ require.Equal(t, int32(0), size, "invalid IR size")
+}
+
+type testCommiteeFetcher struct {
+ keys keys.PublicKeys
+ err error
+ calls atomic.Int32
+}
+
+func (f *testCommiteeFetcher) Committee() (keys.PublicKeys, error) {
+ f.calls.Add(1)
+ return f.keys, f.err
+}
+
+type testIRFetcher struct {
+ keys keys.PublicKeys
+ err error
+ calls atomic.Int32
+}
+
+func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) {
+ f.calls.Add(1)
+ return f.keys, f.err
+}
+
+func BenchmarkKeyPosition(b *testing.B) {
+ list := make(keys.PublicKeys, 7)
+ for i := range list {
+ p, err := keys.NewPrivateKey()
+ require.NoError(b, err)
+ list[i] = p.PublicKey()
+ }
+
+ key := new(keys.PublicKey)
+ require.NoError(b, key.DecodeBytes(list[5].Bytes()))
+
+ b.ResetTimer()
+ b.ReportAllocs()
+ for range b.N {
+ if keyPosition(key, list) != 5 {
+ b.FailNow()
+ }
+ }
+}
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
new file mode 100644
index 0000000000..3d236641e4
--- /dev/null
+++ b/pkg/innerring/initialization.go
@@ -0,0 +1,513 @@
+package innerring
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "net"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance"
+ cont "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
+ nodevalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation"
+ addrvalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/maddress"
+ statevalidation "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ frostfsClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
+ nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ controlsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
+ utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/spf13/viper"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+)
+
+func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
+ alphaSync event.Handler,
+) error {
+ locodeValidator := s.newLocodeValidator(cfg)
+
+ netSettings := (*networkSettings)(s.netmapClient)
+
+ var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator
+ netMapCandidateStateValidator.SetNetworkSettings(netSettings)
+
+ poolSize := cfg.GetInt("workers.netmap")
+ s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
+
+ var err error
+ s.netmapProcessor, err = netmap.New(&netmap.Params{
+ Log: s.log.WithTag(logger.TagProcessor),
+ Metrics: s.irMetrics,
+ PoolSize: poolSize,
+ NetmapClient: netmap.NewNetmapClient(s.netmapClient),
+ EpochTimer: s,
+ EpochState: s,
+ AlphabetState: s,
+ CleanupEnabled: cfg.GetBool("netmap_cleaner.enabled"),
+ CleanupThreshold: cfg.GetUint64("netmap_cleaner.threshold"),
+ NotaryDepositHandler: s.onlyAlphabetEventHandler(
+ s.notaryHandler,
+ ),
+ AlphabetSyncHandler: s.onlyAlphabetEventHandler(
+ alphaSync,
+ ),
+ NodeValidator: nodevalidator.New(
+ &netMapCandidateStateValidator,
+ addrvalidator.New(),
+ locodeValidator,
+ ),
+
+ NodeStateSettings: netSettings,
+ })
+ if err != nil {
+ return err
+ }
+
+ return bindMorphProcessor(s.netmapProcessor, s)
+}
+
+func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *chainParams, errChan chan<- error) error {
+ s.withoutMainNet = cfg.GetBool("without_mainnet")
+ if s.withoutMainNet {
+ // This works as long as event Listener starts listening loop once,
+ // otherwise Server.Start will run two similar routines.
+ // This behavior most likely will not change.
+ s.mainnetListener = s.morphListener
+ s.mainnetClient = s.morphClient
+ return nil
+ }
+
+ mainnetChain := morphChain
+ mainnetChain.name = mainnetPrefix
+ mainnetChain.sgn = &transaction.Signer{Scopes: transaction.CalledByEntry}
+
+ fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
+ if err != nil {
+ fromMainChainBlock = 0
+ s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
+ }
+ mainnetChain.from = fromMainChainBlock
+
+ // create mainnet client
+ s.mainnetClient, err = createClient(ctx, mainnetChain, errChan)
+ if err != nil {
+ return err
+ }
+
+ // create mainnet listener
+ s.mainnetListener, err = createListener(ctx, s.mainnetClient, mainnetChain)
+ return err
+}
+
+func (s *Server) enableNotarySupport() error {
+ // enable notary support in the side client
+ err := s.morphClient.EnableNotarySupport(
+ client.WithProxyContract(s.contracts.proxy),
+ )
+ if err != nil {
+ return fmt.Errorf("could not enable side chain notary support: %w", err)
+ }
+
+ s.morphListener.EnableNotarySupport(s.contracts.proxy, s.morphClient.Committee, s.morphClient)
+
+ if !s.mainNotaryConfig.disabled {
+ // enable notary support in the main client
+ err := s.mainnetClient.EnableNotarySupport(
+ client.WithProxyContract(s.contracts.processing),
+ client.WithAlphabetSource(s.morphClient.Committee),
+ )
+ if err != nil {
+ return fmt.Errorf("could not enable main chain notary support: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func (s *Server) initNotaryConfig(ctx context.Context) {
+ s.mainNotaryConfig = notaryConfigs(
+ !s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
+ )
+
+ s.log.Info(ctx, logs.InnerringNotarySupport,
+ zap.Bool("sidechain_enabled", true),
+ zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
+ )
+}
+
+func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Client, irf irFetcher) (event.Handler, error) {
+ var alphaSync event.Handler
+
+ if s.withoutMainNet || cfg.GetBool("governance.disable") {
+ alphaSync = func(ctx context.Context, _ event.Event) {
+ s.log.Debug(ctx, logs.InnerringAlphabetKeysSyncIsDisabled)
+ }
+ } else {
+ // create governance processor
+ governanceProcessor, err := governance.New(&governance.Params{
+ Log: s.log.WithTag(logger.TagProcessor),
+ Metrics: s.irMetrics,
+ FrostFSClient: frostfsCli,
+ AlphabetState: s,
+ EpochState: s,
+ Voter: s,
+ IRFetcher: irf,
+ MorphClient: s.morphClient,
+ MainnetClient: s.mainnetClient,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ alphaSync = governanceProcessor.HandleAlphabetSync
+ err = bindMainnetProcessor(governanceProcessor, s)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return alphaSync, nil
+}
+
+func (s *Server) createIRFetcher() irFetcher {
+ var irf irFetcher
+
+ if s.withoutMainNet || !s.mainNotaryConfig.disabled {
+ // if mainchain is disabled we should use NeoFSAlphabetList client method according to its docs
+ // (naming `...WithNotary` will not always be correct)
+ irf = NewIRFetcherWithNotary(s.morphClient)
+ } else {
+ irf = NewIRFetcherWithoutNotary(s.netmapClient)
+ }
+
+ return irf
+}
+
+func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
+ s.epochTimer = newEpochTimer(&epochTimerArgs{
+ newEpochHandlers: s.newEpochTickHandlers(ctx),
+ epoch: s,
+ })
+
+ s.addBlockTimer(s.epochTimer)
+
+ // initialize emission timer
+ emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{
+ ap: s.alphabetProcessor,
+ emitDuration: cfg.GetUint32("timers.emit"),
+ })
+
+ s.addBlockTimer(emissionTimer)
+}
+
+func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error {
+ parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
+ if err != nil {
+ return err
+ }
+ poolSize := cfg.GetInt("workers.alphabet")
+ s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize))
+
+ // create alphabet processor
+ s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
+ ParsedWallets: parsedWallets,
+ Log: s.log.WithTag(logger.TagProcessor),
+ Metrics: s.irMetrics,
+ PoolSize: poolSize,
+ AlphabetContracts: s.contracts.alphabet,
+ NetmapClient: s.netmapClient,
+ MorphClient: s.morphClient,
+ IRList: s,
+ StorageEmission: cfg.GetUint64("emit.storage.amount"),
+ })
+ if err != nil {
+ return err
+ }
+
+ err = bindMorphProcessor(s.alphabetProcessor, s)
+ return err
+}
+
+func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
+ poolSize := cfg.GetInt("workers.container")
+ s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize))
+ // container processor
+ containerProcessor, err := cont.New(&cont.Params{
+ Log: s.log.WithTag(logger.TagProcessor),
+ Metrics: s.irMetrics,
+ PoolSize: poolSize,
+ AlphabetState: s,
+ ContainerClient: cnrClient,
+ MorphClient: cnrClient.Morph(),
+ FrostFSIDClient: frostfsIDClient,
+ NetworkState: s.netmapClient,
+ })
+ if err != nil {
+ return err
+ }
+
+ return bindMorphProcessor(containerProcessor, s)
+}
+
+func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
+ poolSize := cfg.GetInt("workers.balance")
+ s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize))
+ // create balance processor
+ balanceProcessor, err := balance.New(&balance.Params{
+ Log: s.log.WithTag(logger.TagProcessor),
+ Metrics: s.irMetrics,
+ PoolSize: poolSize,
+ FrostFSClient: frostfsCli,
+ BalanceSC: s.contracts.balance,
+ AlphabetState: s,
+ Converter: &s.precision,
+ })
+ if err != nil {
+ return err
+ }
+
+ return bindMorphProcessor(balanceProcessor, s)
+}
+
+func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error {
+ if s.withoutMainNet {
+ return nil
+ }
+ poolSize := cfg.GetInt("workers.frostfs")
+ s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize))
+
+ frostfsProcessor, err := frostfs.New(&frostfs.Params{
+ Log: s.log.WithTag(logger.TagProcessor),
+ Metrics: s.irMetrics,
+ PoolSize: poolSize,
+ FrostFSContract: s.contracts.frostfs,
+ BalanceClient: s.balanceClient,
+ NetmapClient: s.netmapClient,
+ MorphClient: s.morphClient,
+ EpochState: s,
+ AlphabetState: s,
+ Converter: &s.precision,
+ MintEmitCacheSize: cfg.GetInt("emit.mint.cache_size"),
+ MintEmitThreshold: cfg.GetUint64("emit.mint.threshold"),
+ MintEmitValue: fixedn.Fixed8(cfg.GetInt64("emit.mint.value")),
+ GasBalanceThreshold: cfg.GetInt64("emit.gas.balance_threshold"),
+ })
+ if err != nil {
+ return err
+ }
+
+ return bindMainnetProcessor(frostfsProcessor, s)
+}
+
+func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
+ controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
+ if controlSvcEndpoint == "" {
+ s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified)
+ return nil
+ }
+
+ authKeysStr := cfg.GetStringSlice("control.authorized_keys")
+ authKeys := make([][]byte, 0, len(authKeysStr))
+
+ for i := range authKeysStr {
+ key, err := hex.DecodeString(authKeysStr[i])
+ if err != nil {
+ return fmt.Errorf("could not parse Control authorized key %s: %w",
+ authKeysStr[i],
+ err,
+ )
+ }
+
+ authKeys = append(authKeys, key)
+ }
+
+ var p controlsrv.Prm
+
+ p.SetPrivateKey(*s.key)
+ p.SetHealthChecker(s)
+
+ controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient,
+ controlsrv.WithAllowedKeys(authKeys),
+ ), log.WithTag(logger.TagGrpcSvc), audit)
+
+ grpcControlSrv := grpc.NewServer()
+ control.RegisterControlServiceServer(grpcControlSrv, controlSvc)
+
+ s.runners = append(s.runners, func(ch chan<- error) error {
+ lis, err := net.Listen("tcp", controlSvcEndpoint)
+ if err != nil {
+ return err
+ }
+
+ go func() {
+ ch <- grpcControlSrv.Serve(lis)
+ }()
+ return nil
+ })
+
+ s.registerNoErrCloser(grpcControlSrv.GracefulStop)
+ return nil
+}
+
+type serverMorphClients struct {
+ CnrClient *container.Client
+ FrostFSIDClient *frostfsid.Client
+ FrostFSClient *frostfsClient.Client
+}
+
+func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
+ result := &serverMorphClients{}
+ var err error
+
+ fee := s.feeConfig.SideChainFee()
+
+ // form morph container client's options
+ morphCnrOpts := make([]container.Option, 0, 3)
+ morphCnrOpts = append(morphCnrOpts,
+ container.AsAlphabet(),
+ )
+
+ result.CnrClient, err = container.NewFromMorph(s.morphClient, s.contracts.container, fee, morphCnrOpts...)
+ if err != nil {
+ return nil, err
+ }
+ s.containerClient = result.CnrClient
+
+ s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet())
+ if err != nil {
+ return nil, err
+ }
+
+ s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet())
+ if err != nil {
+ return nil, err
+ }
+
+ result.FrostFSIDClient, err = frostfsid.NewFromMorph(s.morphClient, s.contracts.frostfsID, fee)
+ if err != nil {
+ return nil, err
+ }
+
+ result.FrostFSClient, err = frostfsClient.NewFromMorph(s.mainnetClient, s.contracts.frostfs,
+ s.feeConfig.MainChainFee(), frostfsClient.TryNotary(), frostfsClient.AsAlphabet())
+ if err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error {
+ irf := s.createIRFetcher()
+
+ s.statusIndex = newInnerRingIndexer(
+ s.morphClient,
+ irf,
+ s.key.PublicKey(),
+ cfg.GetDuration("indexer.cache_timeout"),
+ )
+
+ alphaSync, err := s.createAlphaSync(cfg, morphClients.FrostFSClient, irf)
+ if err != nil {
+ return err
+ }
+
+ err = s.initNetmapProcessor(ctx, cfg, alphaSync)
+ if err != nil {
+ return err
+ }
+
+ err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
+ if err != nil {
+ return err
+ }
+
+ err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient)
+ if err != nil {
+ return err
+ }
+
+ err = s.initFrostFSMainnetProcessor(ctx, cfg)
+ if err != nil {
+ return err
+ }
+
+ err = s.initAlphabetProcessor(ctx, cfg)
+ return err
+}
+
+func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- error) (*chainParams, error) {
+ fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
+ if err != nil {
+ fromSideChainBlock = 0
+ s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
+ }
+
+ morphChain := &chainParams{
+ log: s.log.WithTag(logger.TagMorph),
+ cfg: cfg,
+ key: s.key,
+ name: morphPrefix,
+ from: fromSideChainBlock,
+ morphCacheMetric: s.irMetrics.MorphCacheMetrics(),
+ multinetMetrics: s.irMetrics.Multinet(),
+ }
+
+ // create morph client
+ s.morphClient, err = createClient(ctx, morphChain, errChan)
+ if err != nil {
+ return nil, err
+ }
+
+ // create morph listener
+ s.morphListener, err = createListener(ctx, s.morphClient, morphChain)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.morphClient.SetGroupSignerScope(); err != nil {
+ morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
+ }
+
+ return morphChain, nil
+}
+
+func (s *Server) initContracts(cfg *viper.Viper) error {
+ var err error
+ // get all script hashes of contracts
+ s.contracts, err = parseContracts(
+ cfg,
+ s.morphClient,
+ s.withoutMainNet,
+ s.mainNotaryConfig.disabled,
+ )
+
+ return err
+}
+
+func (s *Server) initKey(cfg *viper.Viper) error {
+ // prepare inner ring node private key
+ acc, err := utilConfig.LoadAccount(
+ cfg.GetString("wallet.path"),
+ cfg.GetString("wallet.address"),
+ cfg.GetString("wallet.password"))
+ if err != nil {
+ return fmt.Errorf("ir: %w", err)
+ }
+
+ s.key = acc.PrivateKey()
+ return nil
+}
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 33cfc39a65..3a51372618 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -2,61 +2,38 @@ package innerring
import (
"context"
- "encoding/hex"
"errors"
"fmt"
"io"
- "net"
+ "sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/container"
- frostfs "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
- nodevalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation"
- addrvalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/maddress"
- statevalidation "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
- subnetvalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/subnet"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
- auditSettlement "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit"
balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- frostfsClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
- morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
- audittask "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/taskmanager"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- controlsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- util2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/precision"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/panjf2000/ants/v2"
"github.com/spf13/viper"
- "go.uber.org/atomic"
"go.uber.org/zap"
- "google.golang.org/grpc"
)
type (
@@ -72,36 +49,36 @@ type (
epochTimer *timer.BlockTimer
// global state
- morphClient *client.Client
- mainnetClient *client.Client
- epochCounter atomic.Uint64
- epochDuration atomic.Uint64
- statusIndex *innerRingIndexer
- precision precision.Fixed8Converter
- auditClient *auditClient.Client
- healthStatus atomic.Value
- balanceClient *balanceClient.Client
- netmapClient *nmClient.Client
- persistate *state.PersistentStorage
+ morphClient *client.Client
+ mainnetClient *client.Client
+ epochCounter atomic.Uint64
+ epochDuration atomic.Uint64
+ statusIndex *innerRingIndexer
+ precision precision.Fixed8Converter
+ healthStatus atomic.Int32
+ balanceClient *balanceClient.Client
+ netmapClient *nmClient.Client
+ persistate *state.PersistentStorage
+ containerClient *container.Client
// metrics
- metrics *metrics.InnerRingServiceMetrics
+ irMetrics *metrics.InnerRingServiceMetrics
// notary configuration
feeConfig *config.FeeConfig
mainNotaryConfig *notaryConfig
- sideNotaryConfig *notaryConfig
// internal variables
key *keys.PrivateKey
- pubKey []byte
contracts *contracts
predefinedValidators keys.PublicKeys
initialEpochTickDelta uint32
withoutMainNet bool
+ sdNotify bool
// runtime processors
- netmapProcessor *netmap.Processor
+ netmapProcessor *netmap.Processor
+ alphabetProcessor *alphabet.Processor
workers []func(context.Context)
@@ -127,16 +104,20 @@ type (
// to the application.
runners []func(chan<- error) error
- subnetHandler
+ // cmode used for upgrade scenario.
+ // nolint:unused
+ cmode *atomic.Bool
}
chainParams struct {
- log *logger.Logger
- cfg *viper.Viper
- key *keys.PrivateKey
- name string
- sgn *transaction.Signer
- from uint32 // block height
+ log *logger.Logger
+ cfg *viper.Viper
+ key *keys.PrivateKey
+ name string
+ sgn *transaction.Signer
+ from uint32 // block height
+ morphCacheMetric metrics.MorphCacheMetrics
+ multinetMetrics metrics.MultinetMetrics
}
)
@@ -159,41 +140,30 @@ var (
// Start runs all event providers.
func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
- s.setHealthStatus(control.HealthStatus_STARTING)
+ s.setHealthStatus(ctx, control.HealthStatus_STARTING)
defer func() {
if err == nil {
- s.setHealthStatus(control.HealthStatus_READY)
+ s.setHealthStatus(ctx, control.HealthStatus_READY)
}
}()
- for _, starter := range s.starters {
- if err := starter(); err != nil {
- return err
- }
- }
-
- err = s.initConfigFromBlockchain()
+ err = s.launchStarters()
if err != nil {
return err
}
- if !s.mainNotaryConfig.disabled {
- err = s.initNotary(ctx,
- s.depositMainNotary,
- s.awaitMainNotaryDeposit,
- "waiting to accept main notary deposit",
- )
+ err = s.initConfigFromBlockchain(ctx)
+ if err != nil {
+ return err
+ }
+
+ if s.IsAlphabet(ctx) {
+ err = s.initMainNotary(ctx)
if err != nil {
return err
}
- }
- if !s.sideNotaryConfig.disabled {
- err = s.initNotary(ctx,
- s.depositSideNotary,
- s.awaitSideNotaryDeposit,
- "waiting to accept side notary deposit",
- )
+ err = s.initSideNotary(ctx)
if err != nil {
return err
}
@@ -203,20 +173,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
prm.Validators = s.predefinedValidators
// vote for sidechain validator if it is prepared in config
- err = s.voteForSidechainValidator(prm)
+ err = s.voteForSidechainValidator(ctx, prm)
if err != nil {
// we don't stop inner ring execution on this error
- s.log.Warn("can't vote for prepared validators",
- zap.String("error", err.Error()))
+ s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
+ zap.Error(err))
}
- // tick initial epoch
- initialEpochTicker := timer.NewOneTickTimer(
- timer.StaticBlockMeter(s.initialEpochTickDelta),
- func() {
- s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
- })
- s.addBlockTimer(initialEpochTicker)
+ s.tickInitialExpoch(ctx)
morphErr := make(chan error)
mainnnetErr := make(chan error)
@@ -233,36 +197,11 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
}
}()
- s.morphListener.RegisterBlockHandler(func(b *block.Block) {
- s.log.Debug("new block",
- zap.Uint32("index", b.Index),
- )
+ s.registerMorphNewBlockEventHandler()
+ s.registerMainnetNewBlockEventHandler()
- err = s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
- if err != nil {
- s.log.Warn("can't update persistent state",
- zap.String("chain", "side"),
- zap.Uint32("block_index", b.Index))
- }
-
- s.tickTimers(b.Index)
- })
-
- if !s.withoutMainNet {
- s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
- err = s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
- if err != nil {
- s.log.Warn("can't update persistent state",
- zap.String("chain", "main"),
- zap.Uint32("block_index", b.Index))
- }
- })
- }
-
- for _, runner := range s.runners {
- if err := runner(intError); err != nil {
- return err
- }
+ if err := s.startRunners(intError); err != nil {
+ return err
}
go s.morphListener.ListenWithError(ctx, morphErr) // listen for neo:morph events
@@ -277,6 +216,82 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
return nil
}
+func (s *Server) registerMorphNewBlockEventHandler() {
+ s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
+ s.log.Debug(ctx, logs.InnerringNewBlock,
+ zap.Uint32("index", b.Index),
+ )
+
+ err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
+ if err != nil {
+ s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
+ zap.String("chain", "side"),
+ zap.Uint32("block_index", b.Index))
+ }
+
+ s.tickTimers(b.Index)
+ })
+}
+
+func (s *Server) registerMainnetNewBlockEventHandler() {
+ if !s.withoutMainNet {
+ s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
+ err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
+ if err != nil {
+ s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
+ zap.String("chain", "main"),
+ zap.Uint32("block_index", b.Index))
+ }
+ })
+ }
+}
+
+func (s *Server) startRunners(errCh chan<- error) error {
+ for _, runner := range s.runners {
+ if err := runner(errCh); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *Server) launchStarters() error {
+ for _, starter := range s.starters {
+ if err := starter(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *Server) initMainNotary(ctx context.Context) error {
+ if !s.mainNotaryConfig.disabled {
+ return s.initNotary(ctx,
+ s.depositMainNotary,
+ s.awaitMainNotaryDeposit,
+ "waiting to accept main notary deposit",
+ )
+ }
+ return nil
+}
+
+func (s *Server) initSideNotary(ctx context.Context) error {
+ return s.initNotary(ctx,
+ s.depositSideNotary,
+ s.awaitSideNotaryDeposit,
+ "waiting to accept side notary deposit",
+ )
+}
+
+func (s *Server) tickInitialExpoch(ctx context.Context) {
+ initialEpochTicker := timer.NewOneTickTimer(
+ timer.StaticBlockMeter(s.initialEpochTickDelta),
+ func() {
+ s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
+ })
+ s.addBlockTimer(initialEpochTicker)
+}
+
func (s *Server) startWorkers(ctx context.Context) {
for _, w := range s.workers {
go w(ctx)
@@ -284,16 +299,16 @@ func (s *Server) startWorkers(ctx context.Context) {
}
// Stop closes all subscription channels.
-func (s *Server) Stop() {
- s.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
+func (s *Server) Stop(ctx context.Context) {
+ s.setHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
go s.morphListener.Stop()
go s.mainnetListener.Stop()
for _, c := range s.closers {
if err := c(); err != nil {
- s.log.Warn("closer error",
- zap.String("error", err.Error()),
+ s.log.Warn(ctx, logs.InnerringCloserError,
+ zap.Error(err),
)
}
}
@@ -319,138 +334,58 @@ func (s *Server) registerStarter(f func() error) {
}
// New creates instance of inner ring sever structure.
-func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan<- error) (*Server, error) {
+func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan<- error,
+ metrics *metrics.InnerRingServiceMetrics, cmode *atomic.Bool, audit *atomic.Bool,
+) (*Server, error) {
var err error
- server := &Server{log: log}
+ server := &Server{
+ log: log.WithTag(logger.TagIr),
+ irMetrics: metrics,
+ cmode: cmode,
+ }
- server.setHealthStatus(control.HealthStatus_HEALTH_STATUS_UNDEFINED)
+ server.sdNotify, err = server.initSdNotify(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ server.setHealthStatus(ctx, control.HealthStatus_HEALTH_STATUS_UNDEFINED)
// parse notary support
server.feeConfig = config.NewFeeConfig(cfg)
- // prepare inner ring node private key
- acc, err := utilConfig.LoadAccount(
- cfg.GetString("wallet.path"),
- cfg.GetString("wallet.address"),
- cfg.GetString("wallet.password"))
+ err = server.initKey(cfg)
if err != nil {
- return nil, fmt.Errorf("ir: %w", err)
+ return nil, err
}
- server.key = acc.PrivateKey()
-
server.persistate, err = initPersistentStateStorage(cfg)
if err != nil {
return nil, err
}
server.registerCloser(server.persistate.Close)
- fromSideChainBlock, err := server.persistate.UInt32(persistateSideChainLastBlockKey)
- if err != nil {
- fromSideChainBlock = 0
- log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
- }
-
- morphChain := &chainParams{
- log: log,
- cfg: cfg,
- key: server.key,
- name: morphPrefix,
- from: fromSideChainBlock,
- }
-
- // create morph client
- server.morphClient, err = createClient(ctx, morphChain, errChan)
+ var morphChain *chainParams
+ morphChain, err = server.initMorph(ctx, cfg, errChan)
if err != nil {
return nil, err
}
- // create morph listener
- server.morphListener, err = createListener(ctx, server.morphClient, morphChain)
- if err != nil {
- return nil, err
- }
- if err := server.morphClient.SetGroupSignerScope(); err != nil {
- morphChain.log.Info("failed to set group signer scope, continue with Global", zap.Error(err))
- }
-
- server.withoutMainNet = cfg.GetBool("without_mainnet")
-
- if server.withoutMainNet {
- // This works as long as event Listener starts listening loop once,
- // otherwise Server.Start will run two similar routines.
- // This behavior most likely will not change.
- server.mainnetListener = server.morphListener
- server.mainnetClient = server.morphClient
- } else {
- mainnetChain := morphChain
- mainnetChain.name = mainnetPrefix
- mainnetChain.sgn = &transaction.Signer{Scopes: transaction.CalledByEntry}
-
- fromMainChainBlock, err := server.persistate.UInt32(persistateMainChainLastBlockKey)
- if err != nil {
- fromMainChainBlock = 0
- log.Warn("can't get last processed main chain block number", zap.String("error", err.Error()))
- }
- mainnetChain.from = fromMainChainBlock
-
- // create mainnet client
- server.mainnetClient, err = createClient(ctx, mainnetChain, errChan)
- if err != nil {
- return nil, err
- }
-
- // create mainnet listener
- server.mainnetListener, err = createListener(ctx, server.mainnetClient, mainnetChain)
- if err != nil {
- return nil, err
- }
- }
-
- server.mainNotaryConfig, server.sideNotaryConfig = parseNotaryConfigs(
- cfg,
- server.morphClient.ProbeNotary(),
- !server.withoutMainNet && server.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
- )
-
- log.Info("notary support",
- zap.Bool("sidechain_enabled", !server.sideNotaryConfig.disabled),
- zap.Bool("mainchain_enabled", !server.mainNotaryConfig.disabled),
- )
-
- // get all script hashes of contracts
- server.contracts, err = parseContracts(
- cfg,
- server.morphClient,
- server.withoutMainNet,
- server.mainNotaryConfig.disabled,
- server.sideNotaryConfig.disabled,
- )
+ err = server.initMainnet(ctx, cfg, morphChain, errChan)
if err != nil {
return nil, err
}
- if !server.sideNotaryConfig.disabled {
- // enable notary support in the side client
- err = server.morphClient.EnableNotarySupport(
- client.WithProxyContract(server.contracts.proxy),
- )
- if err != nil {
- return nil, fmt.Errorf("could not enable side chain notary support: %w", err)
- }
+ server.initNotaryConfig(ctx)
- server.morphListener.EnableNotarySupport(server.contracts.proxy, server.morphClient.Committee, server.morphClient)
+ err = server.initContracts(cfg)
+ if err != nil {
+ return nil, err
}
- if !server.mainNotaryConfig.disabled {
- // enable notary support in the main client
- err = server.mainnetClient.EnableNotarySupport(
- client.WithProxyContract(server.contracts.processing),
- client.WithAlphabetSource(server.morphClient.Committee),
- )
- if err != nil {
- return nil, fmt.Errorf("could not enable main chain notary support: %w", err)
- }
+ err = server.enableNotarySupport()
+ if err != nil {
+ return nil, err
}
// parse default validators
@@ -459,497 +394,35 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, fmt.Errorf("ir: can't parse predefined validators list: %w", err)
}
- server.pubKey = server.key.PublicKey().Bytes()
-
- auditPool, err := ants.NewPool(cfg.GetInt("audit.task.exec_pool_size"))
+ var morphClients *serverMorphClients
+ morphClients, err = server.initClientsFromMorph()
if err != nil {
return nil, err
}
- fee := server.feeConfig.SideChainFee()
-
- // do not use TryNotary() in audit wrapper
- // audit operations do not require multisignatures
- server.auditClient, err = auditClient.NewFromMorph(server.morphClient, server.contracts.audit, fee)
+ err = server.initProcessors(ctx, cfg, morphClients)
if err != nil {
return nil, err
}
- // form morph container client's options
- morphCnrOpts := make([]cntClient.Option, 0, 3)
- morphCnrOpts = append(morphCnrOpts,
- cntClient.TryNotary(),
- cntClient.AsAlphabet(),
- )
+ server.initTimers(ctx, cfg)
- if server.sideNotaryConfig.disabled {
- // in non-notary environments we customize fee for named container registration
- // because it takes much more additional GAS than other operations.
- morphCnrOpts = append(morphCnrOpts,
- cntClient.WithCustomFeeForNamedPut(server.feeConfig.NamedContainerRegistrationFee()),
- )
- }
-
- cnrClient, err := cntClient.NewFromMorph(server.morphClient, server.contracts.container, fee, morphCnrOpts...)
+ err = server.initGRPCServer(ctx, cfg, log, audit)
if err != nil {
return nil, err
}
- server.netmapClient, err = nmClient.NewFromMorph(server.morphClient, server.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet())
- if err != nil {
- return nil, err
- }
-
- server.balanceClient, err = balanceClient.NewFromMorph(server.morphClient, server.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet())
- if err != nil {
- return nil, err
- }
-
- repClient, err := repClient.NewFromMorph(server.morphClient, server.contracts.reputation, fee, repClient.TryNotary(), repClient.AsAlphabet())
- if err != nil {
- return nil, err
- }
-
- frostfsIDClient, err := frostfsid.NewFromMorph(server.morphClient, server.contracts.frostfsID, fee, frostfsid.TryNotary(), frostfsid.AsAlphabet())
- if err != nil {
- return nil, err
- }
-
- frostfsCli, err := frostfsClient.NewFromMorph(server.mainnetClient, server.contracts.frostfs,
- server.feeConfig.MainChainFee(), frostfsClient.TryNotary(), frostfsClient.AsAlphabet())
- if err != nil {
- return nil, err
- }
-
- // initialize morph client of Subnet contract
- clientMode := morphsubnet.NotaryAlphabet
-
- if server.sideNotaryConfig.disabled {
- clientMode = morphsubnet.NonNotary
- }
-
- subnetInitPrm := morphsubnet.InitPrm{}
- subnetInitPrm.SetBaseClient(server.morphClient)
- subnetInitPrm.SetContractAddress(server.contracts.subnet)
- subnetInitPrm.SetMode(clientMode)
-
- subnetClient := &morphsubnet.Client{}
- err = subnetClient.Init(subnetInitPrm)
- if err != nil {
- return nil, fmt.Errorf("could not initialize subnet client: %w", err)
- }
-
- var irf irFetcher
-
- if server.withoutMainNet || !server.mainNotaryConfig.disabled {
- // if mainchain is disabled we should use NeoFSAlphabetList client method according to its docs
- // (naming `...WithNotary` will not always be correct)
- irf = NewIRFetcherWithNotary(server.morphClient)
- } else {
- irf = NewIRFetcherWithoutNotary(server.netmapClient)
- }
-
- server.statusIndex = newInnerRingIndexer(
- server.morphClient,
- irf,
- server.key.PublicKey(),
- cfg.GetDuration("indexer.cache_timeout"),
- )
-
- clientCache := newClientCache(&clientCacheParams{
- Log: log,
- Key: &server.key.PrivateKey,
- SGTimeout: cfg.GetDuration("audit.timeout.get"),
- HeadTimeout: cfg.GetDuration("audit.timeout.head"),
- RangeTimeout: cfg.GetDuration("audit.timeout.rangehash"),
- AllowExternal: cfg.GetBool("audit.allow_external"),
- })
-
- server.registerNoErrCloser(clientCache.cache.CloseAll)
-
- pdpPoolSize := cfg.GetInt("audit.pdp.pairs_pool_size")
- porPoolSize := cfg.GetInt("audit.por.pool_size")
-
- // create audit processor dependencies
- auditTaskManager := audittask.New(
- audittask.WithQueueCapacity(cfg.GetUint32("audit.task.queue_capacity")),
- audittask.WithWorkerPool(auditPool),
- audittask.WithLogger(log),
- audittask.WithContainerCommunicator(clientCache),
- audittask.WithMaxPDPSleepInterval(cfg.GetDuration("audit.pdp.max_sleep_interval")),
- audittask.WithPDPWorkerPoolGenerator(func() (util2.WorkerPool, error) {
- return ants.NewPool(pdpPoolSize)
- }),
- audittask.WithPoRWorkerPoolGenerator(func() (util2.WorkerPool, error) {
- return ants.NewPool(porPoolSize)
- }),
- )
-
- server.workers = append(server.workers, auditTaskManager.Listen)
-
- // create audit processor
- auditProcessor, err := audit.New(&audit.Params{
- Log: log,
- NetmapClient: server.netmapClient,
- ContainerClient: cnrClient,
- IRList: server,
- EpochSource: server,
- SGSource: clientCache,
- Key: &server.key.PrivateKey,
- RPCSearchTimeout: cfg.GetDuration("audit.timeout.search"),
- TaskManager: auditTaskManager,
- Reporter: server,
- })
- if err != nil {
- return nil, err
- }
-
- // create settlement processor dependencies
- settlementDeps := settlementDeps{
- log: server.log,
- cnrSrc: cntClient.AsContainerSource(cnrClient),
- auditClient: server.auditClient,
- nmClient: server.netmapClient,
- clientCache: clientCache,
- balanceClient: server.balanceClient,
- }
-
- settlementDeps.settlementCtx = auditSettlementContext
- auditCalcDeps := &auditSettlementDeps{
- settlementDeps: settlementDeps,
- }
-
- settlementDeps.settlementCtx = basicIncomeSettlementContext
- basicSettlementDeps := &basicIncomeSettlementDeps{
- settlementDeps: settlementDeps,
- cnrClient: cnrClient,
- }
-
- auditSettlementCalc := auditSettlement.NewCalculator(
- &auditSettlement.CalculatorPrm{
- ResultStorage: auditCalcDeps,
- ContainerStorage: auditCalcDeps,
- PlacementCalculator: auditCalcDeps,
- SGStorage: auditCalcDeps,
- AccountStorage: auditCalcDeps,
- Exchanger: auditCalcDeps,
- AuditFeeFetcher: server.netmapClient,
- },
- auditSettlement.WithLogger(server.log),
- )
-
- // create settlement processor
- settlementProcessor := settlement.New(
- settlement.Prm{
- AuditProcessor: (*auditSettlementCalculator)(auditSettlementCalc),
- BasicIncome: &basicSettlementConstructor{dep: basicSettlementDeps},
- State: server,
- },
- settlement.WithLogger(server.log),
- )
-
- locodeValidator, err := server.newLocodeValidator(cfg)
- if err != nil {
- return nil, err
- }
-
- subnetValidator, err := subnetvalidator.New(
- subnetvalidator.Prm{
- SubnetClient: subnetClient,
- },
- )
- if err != nil {
- return nil, err
- }
-
- var alphaSync event.Handler
-
- if server.withoutMainNet || cfg.GetBool("governance.disable") {
- alphaSync = func(event.Event) {
- log.Debug("alphabet keys sync is disabled")
- }
- } else {
- // create governance processor
- governanceProcessor, err := governance.New(&governance.Params{
- Log: log,
- FrostFSClient: frostfsCli,
- NetmapClient: server.netmapClient,
- AlphabetState: server,
- EpochState: server,
- Voter: server,
- IRFetcher: irf,
- MorphClient: server.morphClient,
- MainnetClient: server.mainnetClient,
- NotaryDisabled: server.sideNotaryConfig.disabled,
- })
- if err != nil {
- return nil, err
- }
-
- alphaSync = governanceProcessor.HandleAlphabetSync
- err = bindMainnetProcessor(governanceProcessor, server)
- if err != nil {
- return nil, err
- }
- }
-
- netSettings := (*networkSettings)(server.netmapClient)
-
- var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator
- netMapCandidateStateValidator.SetNetworkSettings(netSettings)
-
- // create netmap processor
- server.netmapProcessor, err = netmap.New(&netmap.Params{
- Log: log,
- PoolSize: cfg.GetInt("workers.netmap"),
- NetmapClient: server.netmapClient,
- EpochTimer: server,
- EpochState: server,
- AlphabetState: server,
- CleanupEnabled: cfg.GetBool("netmap_cleaner.enabled"),
- CleanupThreshold: cfg.GetUint64("netmap_cleaner.threshold"),
- ContainerWrapper: cnrClient,
- HandleAudit: server.onlyActiveEventHandler(
- auditProcessor.StartAuditHandler(),
- ),
- NotaryDepositHandler: server.onlyAlphabetEventHandler(
- server.notaryHandler,
- ),
- AuditSettlementsHandler: server.onlyAlphabetEventHandler(
- settlementProcessor.HandleAuditEvent,
- ),
- AlphabetSyncHandler: alphaSync,
- NodeValidator: nodevalidator.New(
- &netMapCandidateStateValidator,
- addrvalidator.New(),
- locodeValidator,
- subnetValidator,
- ),
- NotaryDisabled: server.sideNotaryConfig.disabled,
- SubnetContract: &server.contracts.subnet,
-
- NodeStateSettings: netSettings,
- })
- if err != nil {
- return nil, err
- }
-
- err = bindMorphProcessor(server.netmapProcessor, server)
- if err != nil {
- return nil, err
- }
-
- // container processor
- containerProcessor, err := container.New(&container.Params{
- Log: log,
- PoolSize: cfg.GetInt("workers.container"),
- AlphabetState: server,
- ContainerClient: cnrClient,
- FrostFSIDClient: frostfsIDClient,
- NetworkState: server.netmapClient,
- NotaryDisabled: server.sideNotaryConfig.disabled,
- SubnetClient: subnetClient,
- })
- if err != nil {
- return nil, err
- }
-
- err = bindMorphProcessor(containerProcessor, server)
- if err != nil {
- return nil, err
- }
-
- // create balance processor
- balanceProcessor, err := balance.New(&balance.Params{
- Log: log,
- PoolSize: cfg.GetInt("workers.balance"),
- FrostFSClient: frostfsCli,
- BalanceSC: server.contracts.balance,
- AlphabetState: server,
- Converter: &server.precision,
- })
- if err != nil {
- return nil, err
- }
-
- err = bindMorphProcessor(balanceProcessor, server)
- if err != nil {
- return nil, err
- }
-
- if !server.withoutMainNet {
- // create mainnnet frostfs processor
- frostfsProcessor, err := frostfs.New(&frostfs.Params{
- Log: log,
- PoolSize: cfg.GetInt("workers.frostfs"),
- FrostFSContract: server.contracts.frostfs,
- FrostFSIDClient: frostfsIDClient,
- BalanceClient: server.balanceClient,
- NetmapClient: server.netmapClient,
- MorphClient: server.morphClient,
- EpochState: server,
- AlphabetState: server,
- Converter: &server.precision,
- MintEmitCacheSize: cfg.GetInt("emit.mint.cache_size"),
- MintEmitThreshold: cfg.GetUint64("emit.mint.threshold"),
- MintEmitValue: fixedn.Fixed8(cfg.GetInt64("emit.mint.value")),
- GasBalanceThreshold: cfg.GetInt64("emit.gas.balance_threshold"),
- })
- if err != nil {
- return nil, err
- }
-
- err = bindMainnetProcessor(frostfsProcessor, server)
- if err != nil {
- return nil, err
- }
- }
-
- parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
- if err != nil {
- return nil, err
- }
-
- // create alphabet processor
- alphabetProcessor, err := alphabet.New(&alphabet.Params{
- ParsedWallets: parsedWallets,
- Log: log,
- PoolSize: cfg.GetInt("workers.alphabet"),
- AlphabetContracts: server.contracts.alphabet,
- NetmapClient: server.netmapClient,
- MorphClient: server.morphClient,
- IRList: server,
- StorageEmission: cfg.GetUint64("emit.storage.amount"),
- })
- if err != nil {
- return nil, err
- }
-
- err = bindMorphProcessor(alphabetProcessor, server)
- if err != nil {
- return nil, err
- }
-
- // create reputation processor
- reputationProcessor, err := reputation.New(&reputation.Params{
- Log: log,
- PoolSize: cfg.GetInt("workers.reputation"),
- EpochState: server,
- AlphabetState: server,
- ReputationWrapper: repClient,
- ManagerBuilder: reputationcommon.NewManagerBuilder(
- reputationcommon.ManagersPrm{
- NetMapSource: server.netmapClient,
- },
- ),
- NotaryDisabled: server.sideNotaryConfig.disabled,
- })
- if err != nil {
- return nil, err
- }
-
- err = bindMorphProcessor(reputationProcessor, server)
- if err != nil {
- return nil, err
- }
-
- // initialize epoch timers
- server.epochTimer = newEpochTimer(&epochTimerArgs{
- l: server.log,
- newEpochHandlers: server.newEpochTickHandlers(),
- cnrWrapper: cnrClient,
- epoch: server,
- stopEstimationDMul: cfg.GetUint32("timers.stop_estimation.mul"),
- stopEstimationDDiv: cfg.GetUint32("timers.stop_estimation.div"),
- collectBasicIncome: subEpochEventHandler{
- handler: settlementProcessor.HandleIncomeCollectionEvent,
- durationMul: cfg.GetUint32("timers.collect_basic_income.mul"),
- durationDiv: cfg.GetUint32("timers.collect_basic_income.div"),
- },
- distributeBasicIncome: subEpochEventHandler{
- handler: settlementProcessor.HandleIncomeDistributionEvent,
- durationMul: cfg.GetUint32("timers.distribute_basic_income.mul"),
- durationDiv: cfg.GetUint32("timers.distribute_basic_income.div"),
- },
- })
-
- server.addBlockTimer(server.epochTimer)
-
- // initialize emission timer
- emissionTimer := newEmissionTimer(&emitTimerArgs{
- ap: alphabetProcessor,
- emitDuration: cfg.GetUint32("timers.emit"),
- })
-
- server.addBlockTimer(emissionTimer)
-
- controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
- if controlSvcEndpoint != "" {
- authKeysStr := cfg.GetStringSlice("control.authorized_keys")
- authKeys := make([][]byte, 0, len(authKeysStr))
-
- for i := range authKeysStr {
- key, err := hex.DecodeString(authKeysStr[i])
- if err != nil {
- return nil, fmt.Errorf("could not parse Control authorized key %s: %w",
- authKeysStr[i],
- err,
- )
- }
-
- authKeys = append(authKeys, key)
- }
-
- var p controlsrv.Prm
-
- p.SetPrivateKey(*server.key)
- p.SetHealthChecker(server)
-
- controlSvc := controlsrv.New(p,
- controlsrv.WithAllowedKeys(authKeys),
- )
-
- grpcControlSrv := grpc.NewServer()
- control.RegisterControlServiceServer(grpcControlSrv, controlSvc)
-
- server.runners = append(server.runners, func(ch chan<- error) error {
- lis, err := net.Listen("tcp", controlSvcEndpoint)
- if err != nil {
- return err
- }
-
- go func() {
- ch <- grpcControlSrv.Serve(lis)
- }()
- return nil
- })
-
- server.registerNoErrCloser(grpcControlSrv.GracefulStop)
- } else {
- log.Info("no Control server endpoint specified, service is disabled")
- }
-
- server.initSubnet(subnetConfig{
- queueSize: cfg.GetUint32("workers.subnet"),
- })
-
- if cfg.GetString("prometheus.address") != "" {
- m := metrics.NewInnerRingMetrics()
- server.metrics = &m
- }
-
return server, nil
}
-func createListener(ctx context.Context, cli *client.Client, p *chainParams) (event.Listener, error) {
- // listenerPoolCap is a capacity of a
- // worker pool inside the listener. It
- // is used to prevent blocking in neo-go:
- // the client cannot make RPC requests if
- // the notification channel is not being
- // read by another goroutine.
- const listenerPoolCap = 10
+func (s *Server) initSdNotify(cfg *viper.Viper) (bool, error) {
+ if cfg.GetBool("systemdnotify.enabled") {
+ return true, sdnotify.InitSocket()
+ }
+ return false, nil
+}
+func createListener(ctx context.Context, cli *client.Client, p *chainParams) (event.Listener, error) {
var (
sub subscriber.Subscriber
err error
@@ -965,9 +438,8 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev
}
listener, err := event.NewListener(event.ListenerParams{
- Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
- Subscriber: sub,
- WorkerPoolCapacity: listenerPoolCap,
+ Logger: p.log.With(zap.String("chain", p.name)),
+ Subscriber: sub,
})
if err != nil {
return nil, err
@@ -995,9 +467,20 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
priority = defaultPriority
}
+ var mtlsConfig *client.MTLSConfig
+ rootCAs := p.cfg.GetStringSlice(fmt.Sprintf("%s.%d.trusted_ca_list", section, i))
+ if len(rootCAs) != 0 {
+ mtlsConfig = &client.MTLSConfig{
+ TrustedCAList: rootCAs,
+ KeyFile: p.cfg.GetString(fmt.Sprintf("%s.%d.key", section, i)),
+ CertFile: p.cfg.GetString(fmt.Sprintf("%s.%d.certificate", section, i)),
+ }
+ }
+
endpoints = append(endpoints, client.Endpoint{
- Address: addr,
- Priority: priority,
+ Address: addr,
+ Priority: priority,
+ MTLSConfig: mtlsConfig,
})
}
@@ -1005,9 +488,15 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
return nil, fmt.Errorf("%s chain client endpoints not provided", p.name)
}
+ nc := parseMultinetConfig(p.cfg, p.multinetMetrics)
+ ds, err := internalNet.NewDialerSource(nc)
+ if err != nil {
+ return nil, fmt.Errorf("dialer source: %w", err)
+ }
+
return client.New(
+ ctx,
p.key,
- client.WithContext(ctx),
client.WithLogger(p.log),
client.WithDialTimeout(p.cfg.GetDuration(p.name+".dial_timeout")),
client.WithSigner(p.sgn),
@@ -1016,6 +505,8 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
errChan <- fmt.Errorf("%s chain connection has been lost", p.name)
}),
client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")),
+ client.WithMorphCacheMetrics(p.morphCacheMetric),
+ client.WithDialerSource(ds),
)
}
@@ -1060,21 +551,43 @@ func parseWalletAddressesFromStrings(wallets []string) ([]util.Uint160, error) {
return extraWallets, nil
}
-func (s *Server) initConfigFromBlockchain() error {
+func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNet.Config {
+ nc := internalNet.Config{
+ Enabled: cfg.GetBool("multinet.enabled"),
+ Balancer: cfg.GetString("multinet.balancer"),
+ Restrict: cfg.GetBool("multinet.restrict"),
+ FallbackDelay: cfg.GetDuration("multinet.fallback_delay"),
+ Metrics: m,
+ }
+ for i := 0; ; i++ {
+ mask := cfg.GetString(fmt.Sprintf("multinet.subnets.%d.mask", i))
+ if mask == "" {
+ break
+ }
+ sourceIPs := cfg.GetStringSlice(fmt.Sprintf("multinet.subnets.%d.source_ips", i))
+ nc.Subnets = append(nc.Subnets, internalNet.Subnet{
+ Prefix: mask,
+ SourceIPs: sourceIPs,
+ })
+ }
+ return nc
+}
+
+func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
// get current epoch
- epoch, err := s.netmapClient.Epoch()
+ epoch, err := s.netmapClient.Epoch(ctx)
if err != nil {
return fmt.Errorf("can't read epoch number: %w", err)
}
// get current epoch duration
- epochDuration, err := s.netmapClient.EpochDuration()
+ epochDuration, err := s.netmapClient.EpochDuration(ctx)
if err != nil {
return fmt.Errorf("can't read epoch duration: %w", err)
}
// get balance precision
- balancePrecision, err := s.balanceClient.Decimals()
+ balancePrecision, err := s.balanceClient.Decimals(ctx)
if err != nil {
return fmt.Errorf("can't read balance contract precision: %w", err)
}
@@ -1084,14 +597,14 @@ func (s *Server) initConfigFromBlockchain() error {
s.precision.SetBalancePrecision(balancePrecision)
// get next epoch delta tick
- s.initialEpochTickDelta, err = s.nextEpochBlockDelta()
+ s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx)
if err != nil {
return err
}
- s.log.Debug("read config from blockchain",
- zap.Bool("active", s.IsActive()),
- zap.Bool("alphabet", s.IsAlphabet()),
+ s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain,
+ zap.Bool("active", s.IsActive(ctx)),
+ zap.Bool("alphabet", s.IsAlphabet(ctx)),
zap.Uint64("epoch", epoch),
zap.Uint32("precision", balancePrecision),
zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta),
@@ -1100,8 +613,8 @@ func (s *Server) initConfigFromBlockchain() error {
return nil
}
-func (s *Server) nextEpochBlockDelta() (uint32, error) {
- epochBlock, err := s.netmapClient.LastEpochBlock()
+func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) {
+ epochBlock, err := s.netmapClient.LastEpochBlock(ctx)
if err != nil {
return 0, fmt.Errorf("can't read last epoch block: %w", err)
}
@@ -1119,32 +632,31 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) {
return delta - blockHeight, nil
}
-// onlyActiveHandler wrapper around event handler that executes it
-// only if inner ring node state is active.
-func (s *Server) onlyActiveEventHandler(f event.Handler) event.Handler {
- return func(ev event.Event) {
- if s.IsActive() {
- f(ev)
- }
- }
-}
-
// onlyAlphabet wrapper around event handler that executes it
// only if inner ring node is alphabet node.
func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler {
- return func(ev event.Event) {
- if s.IsAlphabet() {
- f(ev)
+ return func(ctx context.Context, ev event.Event) {
+ if s.IsAlphabet(ctx) {
+ f(ctx, ev)
}
}
}
-func (s *Server) newEpochTickHandlers() []newEpochHandler {
+func (s *Server) newEpochTickHandlers(ctx context.Context) []newEpochHandler {
newEpochHandlers := []newEpochHandler{
func() {
- s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
+ s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
},
}
return newEpochHandlers
}
+
+func (s *Server) SetExtraWallets(cfg *viper.Viper) error {
+ parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
+ if err != nil {
+ return err
+ }
+ s.alphabetProcessor.SetParsedWallets(parsedWallets)
+ return nil
+}
diff --git a/pkg/innerring/internal/client/client.go b/pkg/innerring/internal/client/client.go
deleted file mode 100644
index edcbc6ae59..0000000000
--- a/pkg/innerring/internal/client/client.go
+++ /dev/null
@@ -1,344 +0,0 @@
-package frostfsapiclient
-
-import (
- "context"
- "crypto/ecdsa"
- "errors"
- "fmt"
- "io"
-
- clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/storagegroup"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-// Client represents FrostFS API client cut down to the needs of a purely IR application.
-type Client struct {
- key *ecdsa.PrivateKey
-
- c clientcore.Client
-}
-
-// WrapBasicClient wraps a client.Client instance to use it for FrostFS API RPC.
-func (x *Client) WrapBasicClient(c clientcore.Client) {
- x.c = c
-}
-
-// SetPrivateKey sets a private key to sign RPC requests.
-func (x *Client) SetPrivateKey(key *ecdsa.PrivateKey) {
- x.key = key
-}
-
-// SearchSGPrm groups parameters of SearchSG operation.
-type SearchSGPrm struct {
- contextPrm
-
- cnrID cid.ID
-}
-
-// SetContainerID sets the ID of the container to search for storage groups.
-func (x *SearchSGPrm) SetContainerID(id cid.ID) {
- x.cnrID = id
-}
-
-// SearchSGRes groups the resulting values of SearchSG operation.
-type SearchSGRes struct {
- cliRes []oid.ID
-}
-
-// IDList returns a list of IDs of storage groups in the container.
-func (x SearchSGRes) IDList() []oid.ID {
- return x.cliRes
-}
-
-var sgFilter = storagegroup.SearchQuery()
-
-// SearchSG lists objects of storage group type in the container.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func (x Client) SearchSG(prm SearchSGPrm) (*SearchSGRes, error) {
- var cliPrm client.PrmObjectSearch
- cliPrm.InContainer(prm.cnrID)
- cliPrm.SetFilters(sgFilter)
- cliPrm.UseKey(*x.key)
-
- rdr, err := x.c.ObjectSearchInit(prm.ctx, cliPrm)
- if err != nil {
- return nil, fmt.Errorf("init object search: %w", err)
- }
-
- buf := make([]oid.ID, 10)
- var list []oid.ID
- var n int
- var ok bool
-
- for {
- n, ok = rdr.Read(buf)
- for i := 0; i < n; i++ {
- list = append(list, buf[i])
- }
- if !ok {
- break
- }
- }
-
- res, err := rdr.Close()
- if err == nil {
- // pull out an error from status
- err = apistatus.ErrFromStatus(res.Status())
- }
-
- if err != nil {
- return nil, fmt.Errorf("read object list: %w", err)
- }
-
- return &SearchSGRes{
- cliRes: list,
- }, nil
-}
-
-// GetObjectPrm groups parameters of GetObject operation.
-type GetObjectPrm struct {
- getObjectPrm
-}
-
-// GetObjectRes groups the resulting values of GetObject operation.
-type GetObjectRes struct {
- obj *object.Object
-}
-
-// Object returns the received object.
-func (x GetObjectRes) Object() *object.Object {
- return x.obj
-}
-
-// GetObject reads the object by address.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func (x Client) GetObject(prm GetObjectPrm) (*GetObjectRes, error) {
- var cliPrm client.PrmObjectGet
- cliPrm.FromContainer(prm.objAddr.Container())
- cliPrm.ByID(prm.objAddr.Object())
- cliPrm.UseKey(*x.key)
-
- rdr, err := x.c.ObjectGetInit(prm.ctx, cliPrm)
- if err != nil {
- return nil, fmt.Errorf("init object search: %w", err)
- }
-
- var obj object.Object
-
- if !rdr.ReadHeader(&obj) {
- res, err := rdr.Close()
- if err == nil {
- // pull out an error from status
- err = apistatus.ErrFromStatus(res.Status())
- }
-
- return nil, fmt.Errorf("read object header: %w", err)
- }
-
- buf := make([]byte, obj.PayloadSize())
-
- _, err = rdr.Read(buf)
- if err != nil && !errors.Is(err, io.EOF) {
- return nil, fmt.Errorf("read payload: %w", err)
- }
-
- obj.SetPayload(buf)
-
- return &GetObjectRes{
- obj: &obj,
- }, nil
-}
-
-// HeadObjectPrm groups parameters of HeadObject operation.
-type HeadObjectPrm struct {
- getObjectPrm
-
- raw bool
-
- local bool
-}
-
-// SetRawFlag sets flag of raw request.
-func (x *HeadObjectPrm) SetRawFlag() {
- x.raw = true
-}
-
-// SetTTL sets request TTL value.
-func (x *HeadObjectPrm) SetTTL(ttl uint32) {
- x.local = ttl < 2
-}
-
-// HeadObjectRes groups the resulting values of HeadObject operation.
-type HeadObjectRes struct {
- hdr *object.Object
-}
-
-// Header returns the received object header.
-func (x HeadObjectRes) Header() *object.Object {
- return x.hdr
-}
-
-// HeadObject reads short object header by address.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-// For raw requests, returns *object.SplitInfoError error if the requested object is virtual.
-func (x Client) HeadObject(prm HeadObjectPrm) (*HeadObjectRes, error) {
- var cliPrm client.PrmObjectHead
-
- if prm.raw {
- cliPrm.MarkRaw()
- }
-
- if prm.local {
- cliPrm.MarkLocal()
- }
-
- cliPrm.FromContainer(prm.objAddr.Container())
- cliPrm.ByID(prm.objAddr.Object())
- cliPrm.UseKey(*x.key)
-
- cliRes, err := x.c.ObjectHead(prm.ctx, cliPrm)
- if err == nil {
- // pull out an error from status
- err = apistatus.ErrFromStatus(cliRes.Status())
- }
-
- if err != nil {
- return nil, fmt.Errorf("read object header from FrostFS: %w", err)
- }
-
- var hdr object.Object
-
- if !cliRes.ReadHeader(&hdr) {
- return nil, errors.New("missing object header in the response")
- }
-
- return &HeadObjectRes{
- hdr: &hdr,
- }, nil
-}
-
-// GetObjectPayload reads an object by address from FrostFS via Client and returns its payload.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func GetObjectPayload(ctx context.Context, c Client, addr oid.Address) ([]byte, error) {
- var prm GetObjectPrm
-
- prm.SetContext(ctx)
- prm.SetAddress(addr)
-
- obj, err := c.GetObject(prm)
- if err != nil {
- return nil, err
- }
-
- return obj.Object().Payload(), nil
-}
-
-func headObject(ctx context.Context, c Client, addr oid.Address, raw bool, ttl uint32) (*object.Object, error) {
- var prm HeadObjectPrm
-
- prm.SetContext(ctx)
- prm.SetAddress(addr)
- prm.SetTTL(ttl)
-
- if raw {
- prm.SetRawFlag()
- }
-
- obj, err := c.HeadObject(prm)
- if err != nil {
- return nil, err
- }
-
- return obj.Header(), nil
-}
-
-// GetRawObjectHeaderLocally reads the raw short object header from the server's local storage by address via Client.
-func GetRawObjectHeaderLocally(ctx context.Context, c Client, addr oid.Address) (*object.Object, error) {
- return headObject(ctx, c, addr, true, 1)
-}
-
-// GetObjectHeaderFromContainer reads the short object header by address via Client with TTL = 10
-// for deep traversal of the container.
-func GetObjectHeaderFromContainer(ctx context.Context, c Client, addr oid.Address) (*object.Object, error) {
- return headObject(ctx, c, addr, false, 10)
-}
-
-// HashPayloadRangePrm groups parameters of HashPayloadRange operation.
-type HashPayloadRangePrm struct {
- getObjectPrm
-
- rng *object.Range
-}
-
-// SetRange sets payload range to calculate the hash.
-func (x *HashPayloadRangePrm) SetRange(rng *object.Range) {
- x.rng = rng
-}
-
-// HashPayloadRangeRes groups the resulting values of HashPayloadRange operation.
-type HashPayloadRangeRes struct {
- h []byte
-}
-
-// Hash returns the hash of the object payload range.
-func (x HashPayloadRangeRes) Hash() []byte {
- return x.h
-}
-
-// HashPayloadRange requests to calculate Tillich-Zemor hash of the payload range of the object
-// from the remote server's local storage.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func (x Client) HashPayloadRange(prm HashPayloadRangePrm) (res HashPayloadRangeRes, err error) {
- var cliPrm client.PrmObjectHash
- cliPrm.FromContainer(prm.objAddr.Container())
- cliPrm.ByID(prm.objAddr.Object())
- cliPrm.SetRangeList(prm.rng.GetOffset(), prm.rng.GetLength())
- cliPrm.TillichZemorAlgo()
-
- cliRes, err := x.c.ObjectHash(prm.ctx, cliPrm)
- if err == nil {
- // pull out an error from status
- err = apistatus.ErrFromStatus(cliRes.Status())
- if err != nil {
- return
- }
-
- hs := cliRes.Checksums()
- if ln := len(hs); ln != 1 {
- err = fmt.Errorf("wrong number of checksums %d", ln)
- } else {
- res.h = hs[0]
- }
- }
-
- return
-}
-
-// HashObjectRange reads Tillich-Zemor hash of the object payload range by address
-// from the remote server's local storage via Client.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func HashObjectRange(ctx context.Context, c Client, addr oid.Address, rng *object.Range) ([]byte, error) {
- var prm HashPayloadRangePrm
-
- prm.SetContext(ctx)
- prm.SetAddress(addr)
- prm.SetRange(rng)
-
- res, err := c.HashPayloadRange(prm)
- if err != nil {
- return nil, err
- }
-
- return res.Hash(), nil
-}
diff --git a/pkg/innerring/internal/client/doc.go b/pkg/innerring/internal/client/doc.go
deleted file mode 100644
index a04b0627b3..0000000000
--- a/pkg/innerring/internal/client/doc.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Package frostfsapiclient provides functionality for IR application communication with FrostFS network.
-//
-// The basic client for accessing remote nodes via FrostFS API is a FrostFS SDK Go API client.
-// However, although it encapsulates a useful piece of business logic (e.g. the signature mechanism),
-// the IR application does not fully use the client's flexible interface.
-//
-// In this regard, this package represents an abstraction -- a type-wrapper over the base client.
-// The type provides the minimum interface necessary for the application and also allows you to concentrate
-// the entire spectrum of the client's use in one place (this will be convenient both when updating the base client
-// and for evaluating the UX of SDK library). So, it is expected that all application packages will be limited
-// to this package for the development of functionality requiring FrostFS API communication.
-package frostfsapiclient
diff --git a/pkg/innerring/internal/client/prm.go b/pkg/innerring/internal/client/prm.go
deleted file mode 100644
index 3c749fe612..0000000000
--- a/pkg/innerring/internal/client/prm.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package frostfsapiclient
-
-import (
- "context"
-
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type contextPrm struct {
- ctx context.Context
-}
-
-// SetContext sets context.Context used for network communication.
-func (x *contextPrm) SetContext(ctx context.Context) {
- x.ctx = ctx
-}
-
-type objectAddressPrm struct {
- objAddr oid.Address
-}
-
-// SetAddress sets address of the object.
-func (x *objectAddressPrm) SetAddress(addr oid.Address) {
- x.objAddr = addr
-}
-
-type getObjectPrm struct {
- contextPrm
- objectAddressPrm
-}
diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go
index a9a9498b6e..ae4c851681 100644
--- a/pkg/innerring/locode.go
+++ b/pkg/innerring/locode.go
@@ -1,15 +1,15 @@
package innerring
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
+ locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
irlocode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
- locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb"
"github.com/spf13/viper"
)
-func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) {
+func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
locodeDB := locodebolt.New(locodebolt.Prm{
Path: cfg.GetString("locode.db.path"),
},
@@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, err
return irlocode.New(irlocode.Prm{
DB: (*locodeBoltDBWrapper)(locodeDB),
- }), nil
+ })
}
type locodeBoltEntryWrapper struct {
diff --git a/pkg/innerring/metrics/metrics.go b/pkg/innerring/metrics/metrics.go
new file mode 100644
index 0000000000..002f3afe10
--- /dev/null
+++ b/pkg/innerring/metrics/metrics.go
@@ -0,0 +1,15 @@
+package metrics
+
+import "time"
+
+type Register interface {
+ SetEpoch(epoch uint64)
+ SetHealth(s int32)
+ AddEvent(d time.Duration, typ string, success bool)
+}
+
+type DefaultRegister struct{}
+
+func (DefaultRegister) SetEpoch(uint64) {}
+func (DefaultRegister) SetHealth(int32) {}
+func (DefaultRegister) AddEvent(time.Duration, string, bool) {}
diff --git a/pkg/innerring/netmap.go b/pkg/innerring/netmap.go
index 9961710caf..fb11e94269 100644
--- a/pkg/innerring/netmap.go
+++ b/pkg/innerring/netmap.go
@@ -1,6 +1,7 @@
package innerring
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@@ -17,8 +18,8 @@ type networkSettings netmapclient.Client
// MaintenanceModeAllowed requests network configuration from the Sidechain
// and check allowance of storage node's maintenance mode according to it.
// Always returns state.ErrMaintenanceModeDisallowed.
-func (s *networkSettings) MaintenanceModeAllowed() error {
- allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed()
+func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error {
+ allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx)
if err != nil {
return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err)
} else if allowed {
diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go
index 8680066e95..c8a69962f9 100644
--- a/pkg/innerring/notary.go
+++ b/pkg/innerring/notary.go
@@ -4,10 +4,10 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/spf13/viper"
"go.uber.org/zap"
)
@@ -28,40 +28,39 @@ const (
gasDivisor = 2
)
-func (s *Server) depositMainNotary() (tx util.Uint256, err error) {
+func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err)
}
return s.mainnetClient.DepositNotary(
+ ctx,
depositAmount,
uint32(s.epochDuration.Load())+notaryExtraBlocks,
)
}
-func (s *Server) depositSideNotary() (tx util.Uint256, err error) {
+func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err)
}
- return s.morphClient.DepositEndlessNotary(depositAmount)
+ tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount)
+ return tx, err
}
-func (s *Server) notaryHandler(_ event.Event) {
+func (s *Server) notaryHandler(ctx context.Context, _ event.Event) {
if !s.mainNotaryConfig.disabled {
- _, err := s.depositMainNotary()
+ _, err := s.depositMainNotary(ctx)
if err != nil {
- s.log.Error("can't make notary deposit in main chain", zap.Error(err))
+ s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
}
}
- if !s.sideNotaryConfig.disabled {
- _, err := s.depositSideNotary()
- if err != nil {
- s.log.Error("can't make notary deposit in side chain", zap.Error(err))
- }
+ if _, err := s.depositSideNotary(ctx); err != nil {
+ s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
}
}
@@ -74,21 +73,29 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er
}
func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error {
- tx, err := deposit()
+ tx, err := deposit(ctx)
if err != nil {
return err
}
- s.log.Info(msg)
+ if tx.Equals(util.Uint256{}) {
+ // non-error deposit with an empty TX hash means
+ // that the deposit has already been made; no
+ // need to wait it.
+ s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade)
+ return nil
+ }
+
+ s.log.Info(ctx, msg)
return await(ctx, tx)
}
func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error {
- for i := 0; i < notaryDepositTimeout; i++ {
+ for range notaryDepositTimeout {
select {
case <-ctx.Done():
- return nil
+ return ctx.Err()
default:
}
@@ -107,16 +114,8 @@ func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash
return errDepositTimeout
}
-func parseNotaryConfigs(cfg *viper.Viper, withSideNotary, withMainNotary bool) (main, side *notaryConfig) {
+func notaryConfigs(withMainNotary bool) (main *notaryConfig) {
main = new(notaryConfig)
- side = new(notaryConfig)
-
- if !withSideNotary {
- main.disabled = true
- side.disabled = true
-
- return
- }
main.disabled = !withMainNotary
diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go
index 9d61aa8123..d6b6532827 100644
--- a/pkg/innerring/processors/alphabet/handlers.go
+++ b/pkg/innerring/processors/alphabet/handlers.go
@@ -1,21 +1,25 @@
package alphabet
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"go.uber.org/zap"
)
-func (ap *Processor) HandleGasEmission(ev event.Event) {
+func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) {
_ = ev.(timers.NewAlphabetEmitTick)
- ap.log.Info("tick", zap.String("type", "alphabet gas emit"))
+ ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
// send event to the worker pool
- err := ap.pool.Submit(func() { ap.processEmit() })
+ err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) })
if err != nil {
// there system can be moved into controlled degradation stage
- ap.log.Warn("alphabet processor worker pool drained",
+ ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained,
zap.Int("capacity", ap.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go
new file mode 100644
index 0000000000..1da3c401de
--- /dev/null
+++ b/pkg/innerring/processors/alphabet/handlers_test.go
@@ -0,0 +1,284 @@
+package alphabet_test
+
+import (
+ "context"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
+ t.Parallel()
+ var emission uint64 = 100_000
+ var index int = 5
+ var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}}
+
+ alphabetContracts := innerring.NewAlphabetContracts()
+ for i := range index + 1 {
+ alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
+ }
+
+ morphClient := &testMorphClient{}
+
+ var node1 netmap.NodeInfo
+ key1, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35")
+ require.NoError(t, err, "failed to parse key1")
+ node1.SetPublicKey(key1.Bytes())
+
+ var node2 netmap.NodeInfo
+ key2, err := keys.NewPublicKeyFromString("02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3")
+ require.NoError(t, err, "failed to parse key2")
+ node2.SetPublicKey(key2.Bytes())
+
+ nodes := []netmap.NodeInfo{node1, node2}
+
+ network := &netmap.NetMap{}
+ network.SetNodes(nodes)
+
+ netmapClient := &testNetmapClient{
+ netmap: network,
+ }
+
+ params := &alphabet.Params{
+ ParsedWallets: parsedWallets,
+ Log: test.NewLogger(t),
+ PoolSize: 2,
+ StorageEmission: emission,
+ IRList: &testIndexer{index: index},
+ AlphabetContracts: alphabetContracts,
+ MorphClient: morphClient,
+ NetmapClient: netmapClient,
+ }
+
+ processor, err := alphabet.New(params)
+ require.NoError(t, err, "failed to create processor instance")
+
+ processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
+
+ processor.WaitPoolRunning()
+
+ require.EqualValues(t, []invokedMethod{
+ {
+ contract: alphabetContracts[innerring.GlagoliticLetter(index)],
+ fee: 0,
+ method: "emit",
+ },
+ }, morphClient.invokedMethods, "invalid invoked morph methods")
+
+ require.EqualValues(t, []transferGas{
+ {
+ receiver: key1.GetScriptHash(),
+ amount: fixedn.Fixed8(25_000),
+ },
+ {
+ receiver: key2.GetScriptHash(),
+ amount: fixedn.Fixed8(25_000),
+ },
+ }, morphClient.transferedGas, "invalid transfered Gas")
+
+ require.EqualValues(t, []batchTransferGas{
+ {
+ receivers: parsedWallets,
+ amount: fixedn.Fixed8(25_000),
+ },
+ }, morphClient.batchTransferedGas, "invalid batch transfered Gas")
+}
+
+func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
+ t.Parallel()
+ var emission uint64 = 100_000
+ var index int = 5
+ var parsedWallets []util.Uint160
+
+ alphabetContracts := innerring.NewAlphabetContracts()
+ for i := range index + 1 {
+ alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
+ }
+
+ morphClient := &testMorphClient{}
+
+ var node1 netmap.NodeInfo
+ key1, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35")
+ require.NoError(t, err, "failed to parse key1")
+ node1.SetPublicKey(key1.Bytes())
+
+ var node2 netmap.NodeInfo
+ key2, err := keys.NewPublicKeyFromString("02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3")
+ require.NoError(t, err, "failed to parse key2")
+ node2.SetPublicKey(key2.Bytes())
+
+ nodes := []netmap.NodeInfo{node1, node2}
+
+ network := &netmap.NetMap{}
+ network.SetNodes(nodes)
+
+ netmapClient := &testNetmapClient{
+ netmap: network,
+ }
+
+ params := &alphabet.Params{
+ ParsedWallets: parsedWallets,
+ Log: test.NewLogger(t),
+ PoolSize: 2,
+ StorageEmission: emission,
+ IRList: &testIndexer{index: index},
+ AlphabetContracts: alphabetContracts,
+ MorphClient: morphClient,
+ NetmapClient: netmapClient,
+ }
+
+ processor, err := alphabet.New(params)
+ require.NoError(t, err, "failed to create processor instance")
+
+ processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
+
+ processor.WaitPoolRunning()
+
+ require.EqualValues(t, []invokedMethod{
+ {
+ contract: alphabetContracts[innerring.GlagoliticLetter(index)],
+ fee: 0,
+ method: "emit",
+ },
+ }, morphClient.invokedMethods, "invalid invoked morph methods")
+
+ require.EqualValues(t, []transferGas{
+ {
+ receiver: key1.GetScriptHash(),
+ amount: fixedn.Fixed8(50_000),
+ },
+ {
+ receiver: key2.GetScriptHash(),
+ amount: fixedn.Fixed8(50_000),
+ },
+ }, morphClient.transferedGas, "invalid transfered Gas")
+
+ require.Equal(t, 0, len(morphClient.batchTransferedGas), "invalid batch transfered Gas")
+}
+
+func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
+ t.Parallel()
+ var emission uint64 = 100_000
+ var index int = 5
+ var parsedWallets []util.Uint160
+
+ alphabetContracts := innerring.NewAlphabetContracts()
+ for i := range index + 1 {
+ alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
+ }
+
+ morphClient := &testMorphClient{}
+
+ var nodes []netmap.NodeInfo
+ network := &netmap.NetMap{}
+ network.SetNodes(nodes)
+
+ netmapClient := &testNetmapClient{
+ netmap: network,
+ }
+
+ params := &alphabet.Params{
+ ParsedWallets: parsedWallets,
+ Log: test.NewLogger(t),
+ PoolSize: 2,
+ StorageEmission: emission,
+ IRList: &testIndexer{index: index},
+ AlphabetContracts: alphabetContracts,
+ MorphClient: morphClient,
+ NetmapClient: netmapClient,
+ }
+
+ processor, err := alphabet.New(params)
+ require.NoError(t, err, "failed to create processor instance")
+
+ processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
+
+ processor.WaitPoolRunning()
+
+ require.EqualValues(t, []invokedMethod{
+ {
+ contract: alphabetContracts[innerring.GlagoliticLetter(index)],
+ fee: 0,
+ method: "emit",
+ },
+ }, morphClient.invokedMethods, "invalid invoked morph methods")
+
+ require.Equal(t, 0, len(morphClient.transferedGas), "invalid transfered Gas")
+
+ require.Equal(t, 0, len(morphClient.batchTransferedGas), "invalid batch transfered Gas")
+}
+
+type testIndexer struct {
+ index int
+}
+
+func (i *testIndexer) AlphabetIndex(context.Context) int {
+ return i.index
+}
+
+type invokedMethod struct {
+ contract util.Uint160
+ fee fixedn.Fixed8
+ method string
+ args []any
+}
+
+type transferGas struct {
+ receiver util.Uint160
+ amount fixedn.Fixed8
+}
+
+type batchTransferGas struct {
+ receivers []util.Uint160
+ amount fixedn.Fixed8
+}
+
+type testMorphClient struct {
+ invokedMethods []invokedMethod
+ transferedGas []transferGas
+ batchTransferedGas []batchTransferGas
+}
+
+func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) {
+ c.invokedMethods = append(c.invokedMethods,
+ invokedMethod{
+ contract: contract,
+ fee: fee,
+ method: method,
+ args: args,
+ })
+ return client.InvokeRes{}, nil
+}
+
+func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error {
+ c.transferedGas = append(c.transferedGas, transferGas{
+ receiver: receiver,
+ amount: amount,
+ })
+ return nil
+}
+
+func (c *testMorphClient) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error {
+ c.batchTransferedGas = append(c.batchTransferedGas, batchTransferGas{
+ receivers: receivers,
+ amount: amount,
+ })
+ return nil
+}
+
+type testNetmapClient struct {
+ netmap *netmap.NetMap
+}
+
+func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) {
+ return c.netmap, nil
+}
diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go
index 353adb455c..d3d0f83f2c 100644
--- a/pkg/innerring/processors/alphabet/process_emit.go
+++ b/pkg/innerring/processors/alphabet/process_emit.go
@@ -1,95 +1,117 @@
package alphabet
import (
+ "context"
"crypto/elliptic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
const emitMethod = "emit"
-func (ap *Processor) processEmit() {
- index := ap.irList.AlphabetIndex()
+func (ap *Processor) processEmit(ctx context.Context) bool {
+ index := ap.irList.AlphabetIndex(ctx)
if index < 0 {
- ap.log.Info("non alphabet mode, ignore gas emission event")
+ ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
- return
+ return true
}
contract, ok := ap.alphabetContracts.GetByIndex(index)
if !ok {
- ap.log.Debug("node is out of alphabet range, ignore gas emission event",
+ ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
zap.Int("index", index))
- return
+ return false
}
// there is no signature collecting, so we don't need extra fee
- err := ap.morphClient.Invoke(contract, 0, emitMethod)
+ _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
if err != nil {
- ap.log.Warn("can't invoke alphabet emit method", zap.String("error", err.Error()))
+ ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
- return
+ return false
}
if ap.storageEmission == 0 {
- ap.log.Info("storage node emission is off")
+ ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff)
- return
+ return true
}
- networkMap, err := ap.netmapClient.NetMap()
+ networkMap, err := ap.netmapClient.NetMap(ctx)
if err != nil {
- ap.log.Warn("can't get netmap snapshot to emit gas to storage nodes",
- zap.String("error", err.Error()))
+ ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
+ zap.Error(err))
- return
+ return false
}
nmNodes := networkMap.Nodes()
+ nmLen := len(nmNodes)
+ ap.pwLock.RLock()
+ pw := ap.parsedWallets
+ ap.pwLock.RUnlock()
+ extraLen := len(pw)
- ln := len(nmNodes)
- if ln == 0 {
- ap.log.Debug("empty network map, do not emit gas")
+ ap.log.Debug(ctx, logs.AlphabetGasEmission,
+ zap.Int("network_map", nmLen),
+ zap.Int("extra_wallets", extraLen))
- return
+ if nmLen+extraLen == 0 {
+ return true
}
- gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(ln+len(ap.parsedWallets)))
+ gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen))
+ ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode)
+
+ ap.transferGasToExtraNodes(ctx, pw, gasPerNode)
+
+ return true
+}
+
+func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
for i := range nmNodes {
keyBytes := nmNodes[i].PublicKey()
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
- ap.log.Warn("can't parse node public key",
- zap.String("error", err.Error()))
+ ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
+ zap.Error(err))
continue
}
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
if err != nil {
- ap.log.Warn("can't transfer gas",
+ ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
}
+}
- err = ap.morphClient.BatchTransferGas(ap.parsedWallets, gasPerNode)
- if err != nil {
- receiversLog := make([]string, len(ap.parsedWallets))
- for i, addr := range ap.parsedWallets {
- receiversLog[i] = addr.StringLE()
+func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) {
+ if len(pw) > 0 {
+ err := ap.morphClient.BatchTransferGas(pw, gasPerNode)
+ if err != nil {
+ receiversLog := make([]string, len(pw))
+ for i, addr := range pw {
+ receiversLog[i] = addr.StringLE()
+ }
+ ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
+ zap.Strings("receivers", receiversLog),
+ zap.Int64("amount", int64(gasPerNode)),
+ zap.Error(err),
+ )
}
- ap.log.Warn("can't transfer gas to wallet",
- zap.Strings("receivers", receiversLog),
- zap.Int64("amount", int64(gasPerNode)),
- zap.String("error", err.Error()),
- )
}
}
diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go
index 9801581329..0aea740031 100644
--- a/pkg/innerring/processors/alphabet/processor.go
+++ b/pkg/innerring/processors/alphabet/processor.go
@@ -1,22 +1,26 @@
package alphabet
import (
+ "context"
"errors"
"fmt"
+ "sync"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
// Indexer is a callback interface for inner ring global state.
Indexer interface {
- AlphabetIndex() int
+ AlphabetIndex(context.Context) int
}
// Contracts is an interface of the storage
@@ -31,14 +35,27 @@ type (
GetByIndex(int) (util.Uint160, bool)
}
+ netmapClient interface {
+ NetMap(ctx context.Context) (*netmap.NetMap, error)
+ }
+
+ morphClient interface {
+ Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error)
+ TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
+ BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error
+ }
+
// Processor of events produced for alphabet contracts in the sidechain.
Processor struct {
- parsedWallets []util.Uint160
+ parsedWallets []util.Uint160
+ // protects parsedWallets from concurrent change
+ pwLock sync.RWMutex
log *logger.Logger
+ metrics metrics.Register
pool *ants.Pool
alphabetContracts Contracts
- netmapClient *nmClient.Client
- morphClient *client.Client
+ netmapClient netmapClient
+ morphClient morphClient
irList Indexer
storageEmission uint64
}
@@ -47,10 +64,11 @@ type (
Params struct {
ParsedWallets []util.Uint160
Log *logger.Logger
+ Metrics metrics.Register
PoolSize int
AlphabetContracts Contracts
- NetmapClient *nmClient.Client
- MorphClient *client.Client
+ NetmapClient netmapClient
+ MorphClient morphClient
IRList Indexer
StorageEmission uint64
}
@@ -67,16 +85,20 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/alphabet: global state is not set")
}
- p.Log.Debug("alphabet worker pool", zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
}
+ metricsRegister := p.Metrics
+ if metricsRegister == nil {
+ metricsRegister = metrics.DefaultRegister{}
+ }
+
return &Processor{
parsedWallets: p.ParsedWallets,
log: p.Log,
+ metrics: metricsRegister,
pool: pool,
alphabetContracts: p.AlphabetContracts,
netmapClient: p.NetmapClient,
@@ -86,9 +108,10 @@ func New(p *Params) (*Processor, error) {
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- return nil
+func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) {
+ ap.pwLock.Lock()
+ ap.parsedWallets = parsedWallets
+ ap.pwLock.Unlock()
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
@@ -106,7 +129,10 @@ func (ap *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return nil
}
-// TimersHandlers for the 'Timers' event producer.
-func (ap *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
+// WaitPoolRunning waits while pool has running tasks
+// For use in test only.
+func (ap *Processor) WaitPoolRunning() {
+ for ap.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
}
diff --git a/pkg/innerring/processors/audit/events.go b/pkg/innerring/processors/audit/events.go
deleted file mode 100644
index 4fb106122c..0000000000
--- a/pkg/innerring/processors/audit/events.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package audit
-
-// Start is an event to start a new round of data audit.
-type Start struct {
- epoch uint64
-}
-
-// MorphEvent implements the Event interface.
-func (a Start) MorphEvent() {}
-
-func NewAuditStartEvent(epoch uint64) Start {
- return Start{
- epoch: epoch,
- }
-}
-
-func (a Start) Epoch() uint64 {
- return a.epoch
-}
diff --git a/pkg/innerring/processors/audit/handlers.go b/pkg/innerring/processors/audit/handlers.go
deleted file mode 100644
index 8b2354bb87..0000000000
--- a/pkg/innerring/processors/audit/handlers.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package audit
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "go.uber.org/zap"
-)
-
-func (ap *Processor) handleNewAuditRound(ev event.Event) {
- auditEvent := ev.(Start)
-
- epoch := auditEvent.Epoch()
-
- ap.log.Info("new round of audit", zap.Uint64("epoch", epoch))
-
- // send an event to the worker pool
-
- err := ap.pool.Submit(func() { ap.processStartAudit(epoch) })
- if err != nil {
- ap.log.Warn("previous round of audit prepare hasn't finished yet")
- }
-}
diff --git a/pkg/innerring/processors/audit/process.go b/pkg/innerring/processors/audit/process.go
deleted file mode 100644
index 7feecb7b60..0000000000
--- a/pkg/innerring/processors/audit/process.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package audit
-
-import (
- "context"
- "crypto/sha256"
-
- clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-func (ap *Processor) processStartAudit(epoch uint64) {
- log := ap.log.With(zap.Uint64("epoch", epoch))
-
- ap.prevAuditCanceler()
-
- skipped := ap.taskManager.Reset()
- if skipped > 0 {
- ap.log.Info("some tasks from previous epoch are skipped",
- zap.Int("amount", skipped),
- )
- }
-
- containers, err := ap.selectContainersToAudit(epoch)
- if err != nil {
- log.Error("container selection failure", zap.String("error", err.Error()))
-
- return
- }
-
- log.Info("select containers for audit", zap.Int("amount", len(containers)))
-
- nm, err := ap.netmapClient.GetNetMap(0)
- if err != nil {
- ap.log.Error("can't fetch network map",
- zap.String("error", err.Error()))
-
- return
- }
-
- var auditCtx context.Context
- auditCtx, ap.prevAuditCanceler = context.WithCancel(context.Background())
-
- pivot := make([]byte, sha256.Size)
-
- for i := range containers {
- cnr, err := cntClient.Get(ap.containerClient, containers[i]) // get container structure
- if err != nil {
- log.Error("can't get container info, ignore",
- zap.Stringer("cid", containers[i]),
- zap.String("error", err.Error()))
-
- continue
- }
-
- containers[i].Encode(pivot)
-
- // find all container nodes for current epoch
- nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), pivot)
- if err != nil {
- log.Info("can't build placement for container, ignore",
- zap.Stringer("cid", containers[i]),
- zap.String("error", err.Error()))
-
- continue
- }
-
- n := placement.FlattenNodes(nodes)
-
- // shuffle nodes to ask a random one
- rand.Shuffle(len(n), func(i, j int) {
- n[i], n[j] = n[j], n[i]
- })
-
- // search storage groups
- storageGroupsIDs := ap.findStorageGroups(containers[i], n)
- log.Info("select storage groups for audit",
- zap.Stringer("cid", containers[i]),
- zap.Int("amount", len(storageGroupsIDs)))
-
- // filter expired storage groups
- storageGroups := ap.filterExpiredSG(containers[i], storageGroupsIDs, nodes, *nm)
- log.Info("filter expired storage groups for audit",
- zap.Stringer("cid", containers[i]),
- zap.Int("amount", len(storageGroups)))
-
- // skip audit for containers without
- // non-expired storage groups
- if len(storageGroupsIDs) == 0 {
- continue
- }
-
- auditTask := new(audit.Task).
- WithReporter(&epochAuditReporter{
- epoch: epoch,
- rep: ap.reporter,
- }).
- WithAuditContext(auditCtx).
- WithContainerID(containers[i]).
- WithStorageGroupList(storageGroups).
- WithContainerStructure(cnr.Value).
- WithContainerNodes(nodes).
- WithNetworkMap(nm)
-
- if err := ap.taskManager.PushTask(auditTask); err != nil {
- ap.log.Error("could not push audit task",
- zap.String("error", err.Error()),
- )
- }
- }
-}
-
-func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []oid.ID {
- var sg []oid.ID
-
- ln := len(shuffled)
-
- var (
- info clientcore.NodeInfo
- prm storagegroup.SearchSGPrm
- )
-
- prm.Container = cnr
-
- for i := range shuffled { // consider iterating over some part of container
- log := ap.log.With(
- zap.Stringer("cid", cnr),
- zap.String("key", netmap.StringifyPublicKey(shuffled[0])),
- zap.Int("try", i),
- zap.Int("total_tries", ln),
- )
-
- err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(shuffled[i]))
- if err != nil {
- log.Warn("parse client node info", zap.String("error", err.Error()))
-
- continue
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), ap.searchTimeout)
-
- prm.Context = ctx
- prm.NodeInfo = info
-
- var dst storagegroup.SearchSGDst
-
- err = ap.sgSrc.ListSG(&dst, prm)
-
- cancel()
-
- if err != nil {
- log.Warn("error in storage group search", zap.String("error", err.Error()))
- continue
- }
-
- sg = append(sg, dst.Objects...)
-
- break // we found storage groups, so break loop
- }
-
- return sg
-}
-
-func (ap *Processor) filterExpiredSG(cid cid.ID, sgIDs []oid.ID,
- cnr [][]netmap.NodeInfo, nm netmap.NetMap) []storagegroup.StorageGroup {
- sgs := make([]storagegroup.StorageGroup, 0, len(sgIDs))
- var coreSG storagegroup.StorageGroup
-
- var getSGPrm storagegroup.GetSGPrm
- getSGPrm.CID = cid
- getSGPrm.Container = cnr
- getSGPrm.NetMap = nm
-
- for _, sgID := range sgIDs {
- ctx, cancel := context.WithTimeout(context.Background(), ap.searchTimeout)
-
- getSGPrm.OID = sgID
- getSGPrm.Context = ctx
-
- sg, err := ap.sgSrc.GetSG(getSGPrm)
-
- cancel()
-
- if err != nil {
- ap.log.Error(
- "could not get storage group object for audit, skipping",
- zap.Stringer("cid", cid),
- zap.Stringer("oid", sgID),
- zap.Error(err),
- )
- continue
- }
-
- // filter expired epochs
- if sg.ExpirationEpoch() >= ap.epochSrc.EpochCounter() {
- coreSG.SetID(sgID)
- coreSG.SetStorageGroup(*sg)
-
- sgs = append(sgs, coreSG)
- }
- }
-
- return sgs
-}
diff --git a/pkg/innerring/processors/audit/processor.go b/pkg/innerring/processors/audit/processor.go
deleted file mode 100644
index cb514b1655..0000000000
--- a/pkg/innerring/processors/audit/processor.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package audit
-
-import (
- "context"
- "crypto/ecdsa"
- "errors"
- "fmt"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/panjf2000/ants/v2"
-)
-
-type (
- // Indexer is a callback interface for inner ring global state.
- Indexer interface {
- InnerRingIndex() int
- InnerRingSize() int
- }
-
- TaskManager interface {
- PushTask(*audit.Task) error
-
- // Must skip all tasks planned for execution and
- // return their number.
- Reset() int
- }
-
- // EpochSource is an interface that provides actual
- // epoch information.
- EpochSource interface {
- // EpochCounter must return current epoch number.
- EpochCounter() uint64
- }
-
- // Processor of events related to data audit.
- Processor struct {
- log *logger.Logger
- pool *ants.Pool
- irList Indexer
- sgSrc storagegroup.SGSource
- epochSrc EpochSource
- searchTimeout time.Duration
-
- containerClient *cntClient.Client
- netmapClient *nmClient.Client
-
- taskManager TaskManager
- reporter audit.Reporter
- prevAuditCanceler context.CancelFunc
- }
-
- // Params of the processor constructor.
- Params struct {
- Log *logger.Logger
- NetmapClient *nmClient.Client
- ContainerClient *cntClient.Client
- IRList Indexer
- SGSource storagegroup.SGSource
- RPCSearchTimeout time.Duration
- TaskManager TaskManager
- Reporter audit.Reporter
- Key *ecdsa.PrivateKey
- EpochSource EpochSource
- }
-)
-
-type epochAuditReporter struct {
- epoch uint64
-
- rep audit.Reporter
-}
-
-// ProcessorPoolSize limits pool size for audit Processor. Processor manages
-// audit tasks and fills queue for the next epoch. This process must not be interrupted
-// by a new audit epoch, so we limit the pool size for the processor to one.
-const ProcessorPoolSize = 1
-
-// New creates audit processor instance.
-func New(p *Params) (*Processor, error) {
- switch {
- case p.Log == nil:
- return nil, errors.New("ir/audit: logger is not set")
- case p.IRList == nil:
- return nil, errors.New("ir/audit: global state is not set")
- case p.SGSource == nil:
- return nil, errors.New("ir/audit: SG source is not set")
- case p.TaskManager == nil:
- return nil, errors.New("ir/audit: audit task manager is not set")
- case p.Reporter == nil:
- return nil, errors.New("ir/audit: audit result reporter is not set")
- case p.Key == nil:
- return nil, errors.New("ir/audit: signing key is not set")
- case p.EpochSource == nil:
- return nil, errors.New("ir/audit: epoch source is not set")
- }
-
- pool, err := ants.NewPool(ProcessorPoolSize, ants.WithNonblocking(true))
- if err != nil {
- return nil, fmt.Errorf("ir/audit: can't create worker pool: %w", err)
- }
-
- return &Processor{
- log: p.Log,
- pool: pool,
- containerClient: p.ContainerClient,
- irList: p.IRList,
- sgSrc: p.SGSource,
- epochSrc: p.EpochSource,
- searchTimeout: p.RPCSearchTimeout,
- netmapClient: p.NetmapClient,
- taskManager: p.TaskManager,
- reporter: p.Reporter,
- prevAuditCanceler: func() {},
- }, nil
-}
-
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- return nil
-}
-
-// ListenerNotificationHandlers for the 'event.Listener' event producer.
-func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- return nil
-}
-
-// TimersHandlers for the 'Timers' event producer.
-func (ap *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
-
-// StartAuditHandler for the internal event producer.
-func (ap *Processor) StartAuditHandler() event.Handler {
- return ap.handleNewAuditRound
-}
-
-func (r *epochAuditReporter) WriteReport(rep *audit.Report) error {
- res := rep.Result()
- res.ForEpoch(r.epoch)
-
- return r.rep.WriteReport(rep)
-}
diff --git a/pkg/innerring/processors/audit/scheduler.go b/pkg/innerring/processors/audit/scheduler.go
deleted file mode 100644
index dd660d4e19..0000000000
--- a/pkg/innerring/processors/audit/scheduler.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package audit
-
-import (
- "errors"
- "fmt"
- "sort"
- "strings"
-
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "go.uber.org/zap"
-)
-
-var ErrInvalidIRNode = errors.New("node is not in the inner ring list")
-
-func (ap *Processor) selectContainersToAudit(epoch uint64) ([]cid.ID, error) {
- containers, err := ap.containerClient.List(nil)
- if err != nil {
- return nil, fmt.Errorf("can't get list of containers to start audit: %w", err)
- }
-
- // consider getting extra information about container complexity from
- // audit contract there
- ap.log.Debug("container listing finished",
- zap.Int("total amount", len(containers)),
- )
-
- sort.Slice(containers, func(i, j int) bool {
- return strings.Compare(containers[i].EncodeToString(), containers[j].EncodeToString()) < 0
- })
-
- ind := ap.irList.InnerRingIndex()
- irSize := ap.irList.InnerRingSize()
-
- if ind < 0 || ind >= irSize {
- return nil, ErrInvalidIRNode
- }
-
- return Select(containers, epoch, uint64(ind), uint64(irSize)), nil
-}
-
-func Select(ids []cid.ID, epoch, index, size uint64) []cid.ID {
- if index >= size {
- return nil
- }
-
- var a, b uint64
-
- ln := uint64(len(ids))
- pivot := ln % size
- delta := ln / size
-
- index = (index + epoch) % size
- if index < pivot {
- a = delta + 1
- } else {
- a = delta
- b = pivot
- }
-
- from := a*index + b
- to := a*(index+1) + b
-
- return ids[from:to]
-}
diff --git a/pkg/innerring/processors/audit/scheduler_test.go b/pkg/innerring/processors/audit/scheduler_test.go
deleted file mode 100644
index 51461beb71..0000000000
--- a/pkg/innerring/processors/audit/scheduler_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package audit_test
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestSelect(t *testing.T) {
- cids := generateContainers(10)
-
- t.Run("invalid input", func(t *testing.T) {
- require.Empty(t, audit.Select(cids, 0, 0, 0))
- })
-
- t.Run("even split", func(t *testing.T) {
- const irSize = 5 // every node takes two audit nodes
-
- m := hitMap(cids)
-
- for i := 0; i < irSize; i++ {
- s := audit.Select(cids, 0, uint64(i), irSize)
- require.Equal(t, len(cids)/irSize, len(s))
-
- for _, id := range s {
- n, ok := m[id.EncodeToString()]
- require.True(t, ok)
- require.Equal(t, 0, n)
- m[id.EncodeToString()] = 1
- }
- }
-
- require.True(t, allHit(m))
- })
-
- t.Run("odd split", func(t *testing.T) {
- const irSize = 3
-
- m := hitMap(cids)
-
- for i := 0; i < irSize; i++ {
- s := audit.Select(cids, 0, uint64(i), irSize)
-
- for _, id := range s {
- n, ok := m[id.EncodeToString()]
- require.True(t, ok)
- require.Equal(t, 0, n)
- m[id.EncodeToString()] = 1
- }
- }
-
- require.True(t, allHit(m))
- })
-
- t.Run("epoch shift", func(t *testing.T) {
- const irSize = 4
-
- m := hitMap(cids)
-
- for i := 0; i < irSize; i++ {
- s := audit.Select(cids, uint64(i), 0, irSize)
-
- for _, id := range s {
- n, ok := m[id.EncodeToString()]
- require.True(t, ok)
- require.Equal(t, 0, n)
- m[id.EncodeToString()] = 1
- }
- }
-
- require.True(t, allHit(m))
- })
-}
-
-func generateContainers(n int) []cid.ID {
- result := make([]cid.ID, n)
-
- for i := 0; i < n; i++ {
- result[i] = cidtest.ID()
- }
-
- return result
-}
-
-func hitMap(ids []cid.ID) map[string]int {
- result := make(map[string]int, len(ids))
-
- for _, id := range ids {
- result[id.EncodeToString()] = 0
- }
-
- return result
-}
-
-func allHit(m map[string]int) bool {
- for _, v := range m {
- if v == 0 {
- return false
- }
- }
-
- return true
-}
diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go
index 4c5a2ddc63..b5d05a02e7 100644
--- a/pkg/innerring/processors/balance/handlers.go
+++ b/pkg/innerring/processors/balance/handlers.go
@@ -1,25 +1,30 @@
package balance
import (
+ "context"
"encoding/hex"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
"go.uber.org/zap"
)
-func (bp *Processor) handleLock(ev event.Event) {
+func (bp *Processor) handleLock(ctx context.Context, ev event.Event) {
lock := ev.(balanceEvent.Lock)
- bp.log.Info("notification",
+ bp.log.Info(ctx, logs.Notification,
zap.String("type", "lock"),
zap.String("value", hex.EncodeToString(lock.ID())))
// send an event to the worker pool
- err := bp.pool.Submit(func() { bp.processLock(&lock) })
+ err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool {
+ return bp.processLock(ctx, &lock)
+ })
if err != nil {
// there system can be moved into controlled degradation stage
- bp.log.Warn("balance worker pool drained",
+ bp.log.Warn(ctx, logs.BalanceBalanceWorkerPoolDrained,
zap.Int("capacity", bp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go
new file mode 100644
index 0000000000..0fd23d8abd
--- /dev/null
+++ b/pkg/innerring/processors/balance/handlers_test.go
@@ -0,0 +1,90 @@
+package balance
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
+ balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) {
+ t.Parallel()
+ as := &testAlphabetState{
+ isAlphabet: true,
+ }
+ conv := &testPresicionConverter{}
+ cl := &testFrostFSContractClient{}
+ bsc := util.Uint160{100}
+
+ processor, err := New(&Params{
+ Log: test.NewLogger(t),
+ PoolSize: 2,
+ FrostFSClient: cl,
+ BalanceSC: bsc,
+ AlphabetState: as,
+ Converter: conv,
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ processor.handleLock(context.Background(), balanceEvent.Lock{})
+
+ for processor.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.Equal(t, 1, cl.chequeCalls, "invalid Cheque calls")
+}
+
+func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) {
+ t.Parallel()
+ as := &testAlphabetState{}
+ conv := &testPresicionConverter{}
+ cl := &testFrostFSContractClient{}
+ bsc := util.Uint160{100}
+
+ processor, err := New(&Params{
+ Log: test.NewLogger(t),
+ PoolSize: 2,
+ FrostFSClient: cl,
+ BalanceSC: bsc,
+ AlphabetState: as,
+ Converter: conv,
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ processor.handleLock(context.Background(), balanceEvent.Lock{})
+
+ for processor.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.Equal(t, 0, cl.chequeCalls, "invalid Cheque calls")
+}
+
+type testAlphabetState struct {
+ isAlphabet bool
+}
+
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+ return s.isAlphabet
+}
+
+type testPresicionConverter struct{}
+
+func (c *testPresicionConverter) ToFixed8(v int64) int64 {
+ return v
+}
+
+type testFrostFSContractClient struct {
+ chequeCalls int
+}
+
+func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error {
+ c.chequeCalls++
+ return nil
+}
diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go
index 754dda34ad..60475908ce 100644
--- a/pkg/innerring/processors/balance/process_assets.go
+++ b/pkg/innerring/processors/balance/process_assets.go
@@ -1,6 +1,9 @@
package balance
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
"go.uber.org/zap"
@@ -8,10 +11,10 @@ import (
// Process lock event by invoking Cheque method in main net to send assets
// back to the withdraw issuer.
-func (bp *Processor) processLock(lock *balanceEvent.Lock) {
- if !bp.alphabetState.IsAlphabet() {
- bp.log.Info("non alphabet mode, ignore balance lock")
- return
+func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool {
+ if !bp.alphabetState.IsAlphabet(ctx) {
+ bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock)
+ return true
}
prm := frostfsContract.ChequePrm{}
@@ -22,8 +25,11 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) {
prm.SetLock(lock.LockAccount())
prm.SetHash(lock.TxHash())
- err := bp.frostfsClient.Cheque(prm)
+ err := bp.frostfsClient.Cheque(ctx, prm)
if err != nil {
- bp.log.Error("can't send lock asset tx", zap.Error(err))
+ bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err))
+ return false
}
+
+ return true
}
diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go
index 2527b7ec39..34203b74fb 100644
--- a/pkg/innerring/processors/balance/processor.go
+++ b/pkg/innerring/processors/balance/processor.go
@@ -1,22 +1,23 @@
package balance
import (
+ "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
}
// PrecisionConverter converts balance amount values.
@@ -24,11 +25,16 @@ type (
ToFixed8(int64) int64
}
+ FrostFSClient interface {
+ Cheque(ctx context.Context, p frostfscontract.ChequePrm) error
+ }
+
// Processor of events produced by balance contract in the morphchain.
Processor struct {
log *logger.Logger
+ metrics metrics.Register
pool *ants.Pool
- frostfsClient *frostfscontract.Client
+ frostfsClient FrostFSClient
balanceSC util.Uint160
alphabetState AlphabetState
converter PrecisionConverter
@@ -37,8 +43,9 @@ type (
// Params of the processor constructor.
Params struct {
Log *logger.Logger
+ Metrics metrics.Register
PoolSize int
- FrostFSClient *frostfscontract.Client
+ FrostFSClient FrostFSClient
BalanceSC util.Uint160
AlphabetState AlphabetState
Converter PrecisionConverter
@@ -60,15 +67,19 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/balance: balance precision converter is not set")
}
- p.Log.Debug("balance worker pool", zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err)
}
+ metricsRegister := p.Metrics
+ if metricsRegister == nil {
+ metricsRegister = metrics.DefaultRegister{}
+ }
+
return &Processor{
log: p.Log,
+ metrics: metricsRegister,
pool: pool,
frostfsClient: p.FrostFSClient,
balanceSC: p.BalanceSC,
@@ -77,32 +88,16 @@ func New(p *Params) (*Processor, error) {
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- var parsers []event.NotificationParserInfo
-
- // new lock event
- lock := event.NotificationParserInfo{}
- lock.SetType(lockNotification)
- lock.SetScriptHash(bp.balanceSC)
- lock.SetParser(balanceEvent.ParseLock)
- parsers = append(parsers, lock)
-
- return parsers
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- var handlers []event.NotificationHandlerInfo
-
- // lock handler
- lock := event.NotificationHandlerInfo{}
- lock.SetType(lockNotification)
- lock.SetScriptHash(bp.balanceSC)
- lock.SetHandler(bp.handleLock)
- handlers = append(handlers, lock)
-
- return handlers
+ return []event.NotificationHandlerInfo{
+ {
+ Contract: bp.balanceSC,
+ Type: lockNotification,
+ Parser: balanceEvent.ParseLock,
+ Handlers: []event.Handler{bp.handleLock},
+ },
+ }
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
@@ -114,8 +109,3 @@ func (bp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
func (bp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return nil
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (bp *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/container/common.go b/pkg/innerring/processors/container/common.go
index bfbf6c7828..5334b9a1fe 100644
--- a/pkg/innerring/processors/container/common.go
+++ b/pkg/innerring/processors/container/common.go
@@ -1,11 +1,11 @@
package container
import (
+ "context"
"crypto/ecdsa"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@@ -46,7 +46,7 @@ type signatureVerificationData struct {
// - v.binPublicKey is a public session key
// - session context corresponds to the container and verb in v
// - session is "alive"
-func (cp *Processor) verifySignature(v signatureVerificationData) error {
+func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error {
var err error
var key frostfsecdsa.PublicKeyRFC6979
keyProvided := v.binPublicKey != nil
@@ -59,49 +59,10 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error {
}
if len(v.binTokenSession) > 0 {
- var tok session.Container
-
- err = tok.Unmarshal(v.binTokenSession)
- if err != nil {
- return fmt.Errorf("decode session token: %w", err)
- }
-
- if !tok.VerifySignature() {
- return errors.New("invalid session token signature")
- }
-
- // FIXME(@cthulhu-rider): #1387 check token is signed by container owner, see neofs-sdk-go#233
-
- if keyProvided && !tok.AssertAuthKey(&key) {
- return errors.New("signed with a non-session key")
- }
-
- if !tok.AssertVerb(v.verb) {
- return errWrongSessionVerb
- }
-
- if v.idContainerSet && !tok.AppliedTo(v.idContainer) {
- return errWrongCID
- }
-
- if !session.IssuedBy(tok, v.ownerContainer) {
- return errors.New("owner differs with token owner")
- }
-
- err = cp.checkTokenLifetime(tok)
- if err != nil {
- return fmt.Errorf("check session lifetime: %w", err)
- }
-
- if !tok.VerifySessionDataSignature(v.signedData, v.signature) {
- return errors.New("invalid signature calculated with session key")
- }
-
- return nil
+ return cp.verifyByTokenSession(ctx, v, &key, keyProvided)
}
if keyProvided {
- // TODO(@cthulhu-rider): #1387 use another approach after neofs-sdk-go#233
var idFromKey user.ID
user.IDFromKey(&idFromKey, (ecdsa.PublicKey)(key))
@@ -112,27 +73,13 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error {
return errors.New("invalid signature calculated by container owner's key")
}
- } else {
- var prm frostfsid.AccountKeysPrm
- prm.SetID(v.ownerContainer)
-
- ownerKeys, err := cp.idClient.AccountKeys(prm)
- if err != nil {
- return fmt.Errorf("receive owner keys %s: %w", v.ownerContainer, err)
- }
-
- for i := range ownerKeys {
- if (*frostfsecdsa.PublicKeyRFC6979)(ownerKeys[i]).Verify(v.signedData, v.signature) {
- return nil
- }
- }
}
return errors.New("signature is invalid or calculated with the key not bound to the container owner")
}
-func (cp *Processor) checkTokenLifetime(token session.Container) error {
- curEpoch, err := cp.netState.Epoch()
+func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error {
+ curEpoch, err := cp.netState.Epoch(ctx)
if err != nil {
return fmt.Errorf("could not read current epoch: %w", err)
}
@@ -143,3 +90,43 @@ func (cp *Processor) checkTokenLifetime(token session.Container) error {
return nil
}
+
+func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error {
+ var tok session.Container
+
+ err := tok.Unmarshal(v.binTokenSession)
+ if err != nil {
+ return fmt.Errorf("decode session token: %w", err)
+ }
+
+ if !tok.VerifySignature() {
+ return errors.New("invalid session token signature")
+ }
+
+ if keyProvided && !tok.AssertAuthKey(key) {
+ return errors.New("signed with a non-session key")
+ }
+
+ if !tok.AssertVerb(v.verb) {
+ return errWrongSessionVerb
+ }
+
+ if v.idContainerSet && !tok.AppliedTo(v.idContainer) {
+ return errWrongCID
+ }
+
+ if !session.IssuedBy(tok, v.ownerContainer) {
+ return errors.New("owner differs with token owner")
+ }
+
+ err = cp.checkTokenLifetime(ctx, tok)
+ if err != nil {
+ return fmt.Errorf("check session lifetime: %w", err)
+ }
+
+ if !tok.VerifySessionDataSignature(v.signedData, v.signature) {
+ return errors.New("invalid signature calculated with session key")
+ }
+
+ return nil
+}
diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go
index f9f8b58412..bb038a3cbf 100644
--- a/pkg/innerring/processors/container/handlers.go
+++ b/pkg/innerring/processors/container/handlers.go
@@ -1,63 +1,51 @@
package container
import (
+ "context"
"crypto/sha256"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"github.com/mr-tron/base58"
"go.uber.org/zap"
)
-func (cp *Processor) handlePut(ev event.Event) {
+func (cp *Processor) handlePut(ctx context.Context, ev event.Event) {
put := ev.(putEvent)
id := sha256.Sum256(put.Container())
- cp.log.Info("notification",
+ cp.log.Info(ctx, logs.Notification,
zap.String("type", "container put"),
zap.String("id", base58.Encode(id[:])))
// send an event to the worker pool
- err := cp.pool.Submit(func() { cp.processContainerPut(put) })
+ err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool {
+ return cp.processContainerPut(ctx, put)
+ })
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn("container processor worker pool drained",
+ cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
-func (cp *Processor) handleDelete(ev event.Event) {
+func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) {
del := ev.(containerEvent.Delete)
- cp.log.Info("notification",
+ cp.log.Info(ctx, logs.Notification,
zap.String("type", "container delete"),
zap.String("id", base58.Encode(del.ContainerID())))
// send an event to the worker pool
- err := cp.pool.Submit(func() { cp.processContainerDelete(&del) })
- if err != nil {
- // there system can be moved into controlled degradation stage
- cp.log.Warn("container processor worker pool drained",
- zap.Int("capacity", cp.pool.Cap()))
- }
-}
-
-func (cp *Processor) handleSetEACL(ev event.Event) {
- e := ev.(containerEvent.SetEACL)
-
- cp.log.Info("notification",
- zap.String("type", "set EACL"),
- )
-
- // send an event to the worker pool
-
- err := cp.pool.Submit(func() {
- cp.processSetEACL(e)
+ err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool {
+ return cp.processContainerDelete(ctx, del)
})
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn("container processor worker pool drained",
+ cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go
new file mode 100644
index 0000000000..1b3842eb07
--- /dev/null
+++ b/pkg/innerring/processors/container/handlers_test.go
@@ -0,0 +1,242 @@
+package container
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "encoding/hex"
+ "testing"
+ "time"
+
+ frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
+ frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/network/payload"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPutEvent(t *testing.T) {
+ t.Parallel()
+ nst := &testNetworkState{
+ homHashDisabled: true,
+ epoch: 100,
+ }
+ mc := &testMorphClient{}
+
+ proc, err := New(&Params{
+ Log: test.NewLogger(t),
+ PoolSize: 2,
+ AlphabetState: &testAlphabetState{isAlphabet: true},
+ NetworkState: nst,
+ ContainerClient: &testContainerClient{},
+ MorphClient: mc,
+ FrostFSIDClient: &testFrostFSIDClient{},
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ p, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ var usr user.ID
+ user.IDFromKey(&usr, (ecdsa.PublicKey)(*p.PublicKey()))
+
+ var pp netmap.PlacementPolicy
+ pp.AddReplicas(netmap.ReplicaDescriptor{})
+
+ var cnr containerSDK.Container
+ cnr.Init()
+ cnr.SetOwner(usr)
+ cnr.SetPlacementPolicy(pp)
+ cnr.SetBasicACL(acl.Private)
+ containerSDK.DisableHomomorphicHashing(&cnr)
+
+ nr := &payload.P2PNotaryRequest{
+ MainTransaction: &transaction.Transaction{},
+ }
+
+ event := &testPutEvent{
+ cnr: &cnr,
+ pk: p,
+ st: nil,
+ nr: nr,
+ }
+
+ proc.handlePut(context.Background(), event)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.EqualValues(t, []*transaction.Transaction{nr.MainTransaction}, mc.transactions, "invalid notary requests")
+}
+
+func TestDeleteEvent(t *testing.T) {
+ t.Parallel()
+ nst := &testNetworkState{
+ homHashDisabled: true,
+ epoch: 100,
+ }
+ cc := &testContainerClient{
+ get: make(map[string]*containercore.Container),
+ }
+
+ p, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ mc := &testMorphClient{}
+
+ proc, err := New(&Params{
+ Log: test.NewLogger(t),
+ PoolSize: 2,
+ AlphabetState: &testAlphabetState{isAlphabet: true},
+ NetworkState: nst,
+ ContainerClient: cc,
+ MorphClient: mc,
+ FrostFSIDClient: &testFrostFSIDClient{},
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ var usr user.ID
+ user.IDFromKey(&usr, (ecdsa.PublicKey)(*p.PublicKey()))
+
+ var pp netmap.PlacementPolicy
+ pp.AddReplicas(netmap.ReplicaDescriptor{})
+
+ var cnr containerSDK.Container
+ cnr.Init()
+ cnr.SetOwner(usr)
+ cnr.SetPlacementPolicy(pp)
+ cnr.SetBasicACL(acl.Private)
+ containerSDK.DisableHomomorphicHashing(&cnr)
+
+ var cid cid.ID
+ containerSDK.CalculateID(&cid, cnr)
+ cidBin := make([]byte, 32)
+ cid.Encode(cidBin)
+
+ nr := &payload.P2PNotaryRequest{
+ MainTransaction: &transaction.Transaction{},
+ }
+
+ ev := containerEvent.Delete{
+ ContainerIDValue: cidBin,
+ SignatureValue: p.Sign(cidBin),
+ NotaryRequestValue: nr,
+ PublicKeyValue: p.PublicKey().Bytes(),
+ }
+
+ var signature frostfscrypto.Signature
+ signer := frostfsecdsa.Signer(p.PrivateKey)
+ require.NoError(t, signature.Calculate(signer, ev.ContainerID()), "failed to calculate signature")
+ cc.get[hex.EncodeToString(ev.ContainerID())] = &containercore.Container{
+ Value: cnr,
+ Signature: signature,
+ }
+
+ proc.handleDelete(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ var expectedDelete cntClient.DeletePrm
+ expectedDelete.SetCID(ev.ContainerID())
+ expectedDelete.SetSignature(ev.Signature())
+
+ require.EqualValues(t, []*transaction.Transaction{nr.MainTransaction}, mc.transactions, "invalid notary requests")
+}
+
+type testAlphabetState struct {
+ isAlphabet bool
+}
+
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+ return s.isAlphabet
+}
+
+type testNetworkState struct {
+ homHashDisabled bool
+ epoch uint64
+}
+
+func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) {
+ return s.homHashDisabled, nil
+}
+
+func (s *testNetworkState) Epoch(context.Context) (uint64, error) {
+ return s.epoch, nil
+}
+
+type testContainerClient struct {
+ contractAddress util.Uint160
+ get map[string]*containercore.Container
+}
+
+func (c *testContainerClient) ContractAddress() util.Uint160 {
+ return c.contractAddress
+}
+
+func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
+ key := hex.EncodeToString(cid)
+ if cont, found := c.get[key]; found {
+ return cont, nil
+ }
+ return nil, new(apistatus.ContainerNotFound)
+}
+
+var _ putEvent = &testPutEvent{}
+
+type testPutEvent struct {
+ cnr *containerSDK.Container
+ pk *keys.PrivateKey
+ st []byte
+ nr *payload.P2PNotaryRequest
+}
+
+func (e *testPutEvent) MorphEvent() {}
+
+func (e *testPutEvent) Container() []byte {
+ return e.cnr.Marshal()
+}
+
+func (e *testPutEvent) PublicKey() []byte {
+ return e.pk.PublicKey().Bytes()
+}
+
+func (e *testPutEvent) Signature() []byte {
+ return e.pk.Sign(e.cnr.Marshal())
+}
+
+func (e *testPutEvent) SessionToken() []byte {
+ return e.st
+}
+
+func (e *testPutEvent) NotaryRequest() *payload.P2PNotaryRequest {
+ return e.nr
+}
+
+type testMorphClient struct {
+ transactions []*transaction.Transaction
+}
+
+func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
+ c.transactions = append(c.transactions, mainTx)
+ return nil
+}
+
+type testFrostFSIDClient struct{}
+
+func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
+ return &frostfsidclient.Subject{}, nil
+}
diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go
index 8b244aa5dc..8e4ab2623b 100644
--- a/pkg/innerring/processors/container/process_container.go
+++ b/pkg/innerring/processors/container/process_container.go
@@ -1,17 +1,19 @@
package container
import (
+ "context"
+ "errors"
"fmt"
+ "strings"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
"github.com/nspcc-dev/neo-go/pkg/network/payload"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
@@ -31,32 +33,41 @@ type putContainerContext struct {
d containerSDK.Domain
}
+var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner namespaces do not match")
+
// Process a new container from the user by checking the container sanity
// and sending approve tx back to the morph.
-func (cp *Processor) processContainerPut(put putEvent) {
- if !cp.alphabetState.IsAlphabet() {
- cp.log.Info("non alphabet mode, ignore container put")
- return
+func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool {
+ if !cp.alphabetState.IsAlphabet(ctx) {
+ cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut)
+ return true
}
- ctx := &putContainerContext{
+ pctx := &putContainerContext{
e: put,
}
- err := cp.checkPutContainer(ctx)
+ err := cp.checkPutContainer(ctx, pctx)
if err != nil {
- cp.log.Error("put container check failed",
- zap.String("error", err.Error()),
+ cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
+ zap.Error(err),
)
- return
+ return false
}
- cp.approvePutContainer(ctx)
+ if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
+ cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
+ zap.Error(err),
+ )
+ return false
+ }
+
+ return true
}
-func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
- binCnr := ctx.e.Container()
+func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error {
+ binCnr := pctx.e.Container()
var cnr containerSDK.Container
err := cnr.Unmarshal(binCnr)
@@ -64,32 +75,26 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
return fmt.Errorf("invalid binary container: %w", err)
}
- err = cp.verifySignature(signatureVerificationData{
+ err = cp.verifySignature(ctx, signatureVerificationData{
ownerContainer: cnr.Owner(),
verb: session.VerbContainerPut,
- binTokenSession: ctx.e.SessionToken(),
- binPublicKey: ctx.e.PublicKey(),
- signature: ctx.e.Signature(),
+ binTokenSession: pctx.e.SessionToken(),
+ binPublicKey: pctx.e.PublicKey(),
+ signature: pctx.e.Signature(),
signedData: binCnr,
})
if err != nil {
return fmt.Errorf("auth container creation: %w", err)
}
- // check owner allowance in the subnetwork
- err = checkSubnet(cp.subnetClient, cnr)
- if err != nil {
- return fmt.Errorf("incorrect subnetwork: %w", err)
- }
-
// check homomorphic hashing setting
- err = checkHomomorphicHashing(cp.netState, cnr)
+ err = checkHomomorphicHashing(ctx, cp.netState, cnr)
if err != nil {
return fmt.Errorf("incorrect homomorphic hashing setting: %w", err)
}
// check native name and zone
- err = checkNNS(ctx, cnr)
+ err = cp.checkNNS(ctx, pctx, cnr)
if err != nil {
return fmt.Errorf("NNS: %w", err)
}
@@ -97,55 +102,35 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
return nil
}
-func (cp *Processor) approvePutContainer(ctx *putContainerContext) {
- e := ctx.e
-
- var err error
-
- prm := cntClient.PutPrm{}
-
- prm.SetContainer(e.Container())
- prm.SetKey(e.PublicKey())
- prm.SetSignature(e.Signature())
- prm.SetToken(e.SessionToken())
- prm.SetName(ctx.d.Name())
- prm.SetZone(ctx.d.Zone())
-
- if nr := e.NotaryRequest(); nr != nil {
- // put event was received via Notary service
- err = cp.cnrClient.Morph().NotarySignAndInvokeTX(nr.MainTransaction)
- } else {
- // put event was received via notification service
- err = cp.cnrClient.Put(prm)
- }
- if err != nil {
- cp.log.Error("could not approve put container",
- zap.String("error", err.Error()),
- )
- }
-}
-
// Process delete container operation from the user by checking container sanity
// and sending approve tx back to morph.
-func (cp *Processor) processContainerDelete(e *containerEvent.Delete) {
- if !cp.alphabetState.IsAlphabet() {
- cp.log.Info("non alphabet mode, ignore container delete")
- return
+func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool {
+ if !cp.alphabetState.IsAlphabet(ctx) {
+ cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete)
+ return true
}
- err := cp.checkDeleteContainer(e)
+ err := cp.checkDeleteContainer(ctx, e)
if err != nil {
- cp.log.Error("delete container check failed",
- zap.String("error", err.Error()),
+ cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
+ zap.Error(err),
)
- return
+ return false
}
- cp.approveDeleteContainer(e)
+ if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
+ cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
+ zap.Error(err),
+ )
+
+ return false
+ }
+
+ return true
}
-func (cp *Processor) checkDeleteContainer(e *containerEvent.Delete) error {
+func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error {
binCnr := e.ContainerID()
var idCnr cid.ID
@@ -156,12 +141,12 @@ func (cp *Processor) checkDeleteContainer(e *containerEvent.Delete) error {
}
// receive owner of the related container
- cnr, err := cp.cnrClient.Get(binCnr)
+ cnr, err := cp.cnrClient.Get(ctx, binCnr)
if err != nil {
return fmt.Errorf("could not receive the container: %w", err)
}
- err = cp.verifySignature(signatureVerificationData{
+ err = cp.verifySignature(ctx, signatureVerificationData{
ownerContainer: cnr.Value.Owner(),
verb: session.VerbContainerDelete,
idContainerSet: true,
@@ -169,6 +154,7 @@ func (cp *Processor) checkDeleteContainer(e *containerEvent.Delete) error {
binTokenSession: e.SessionToken(),
signature: e.Signature(),
signedData: binCnr,
+ binPublicKey: e.PublicKeyValue,
})
if err != nil {
return fmt.Errorf("auth container removal: %w", err)
@@ -177,80 +163,53 @@ func (cp *Processor) checkDeleteContainer(e *containerEvent.Delete) error {
return nil
}
-func (cp *Processor) approveDeleteContainer(e *containerEvent.Delete) {
- var err error
-
- prm := cntClient.DeletePrm{}
-
- prm.SetCID(e.ContainerID())
- prm.SetSignature(e.Signature())
- prm.SetToken(e.SessionToken())
-
- if nr := e.NotaryRequest(); nr != nil {
- // delete event was received via Notary service
- err = cp.cnrClient.Morph().NotarySignAndInvokeTX(nr.MainTransaction)
- } else {
- // delete event was received via notification service
- err = cp.cnrClient.Delete(prm)
- }
- if err != nil {
- cp.log.Error("could not approve delete container",
- zap.String("error", err.Error()),
- )
- }
-}
-
-func checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error {
+func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error {
// fetch domain info
- ctx.d = containerSDK.ReadDomain(cnr)
+ pctx.d = containerSDK.ReadDomain(cnr)
// if PutNamed event => check if values in container correspond to args
- if named, ok := ctx.e.(interface {
+ if named, ok := pctx.e.(interface {
Name() string
Zone() string
}); ok {
- if name := named.Name(); name != ctx.d.Name() {
- return fmt.Errorf("names differ %s/%s", name, ctx.d.Name())
+ if name := named.Name(); name != pctx.d.Name() {
+ return fmt.Errorf("names differ %s/%s", name, pctx.d.Name())
}
- if zone := named.Zone(); zone != ctx.d.Zone() {
- return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone())
+ if zone := named.Zone(); zone != pctx.d.Zone() {
+ return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone())
}
}
- return nil
-}
+ addr, err := util.Uint160DecodeBytesBE(cnr.Owner().WalletBytes()[1 : 1+util.Uint160Size])
+ if err != nil {
+ return fmt.Errorf("could not get container owner address: %w", err)
+ }
-func checkSubnet(subCli *morphsubnet.Client, cnr containerSDK.Container) error {
- prm := morphsubnet.UserAllowedPrm{}
+ subject, err := cp.frostFSIDClient.GetSubject(ctx, addr)
+ if err != nil {
+ return fmt.Errorf("could not get subject from FrostfsID contract: %w", err)
+ }
- subID := cnr.PlacementPolicy().Subnet()
- if subnetid.IsZero(subID) {
+ namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns")
+ if !hasNamespace {
return nil
}
- prm.SetID(subID.Marshal())
- prm.SetClient(cnr.Owner().WalletBytes())
-
- res, err := subCli.UserAllowed(prm)
- if err != nil {
- return fmt.Errorf("could not check user in contract: %w", err)
- }
-
- if !res.Allowed() {
- return fmt.Errorf("user is not allowed to create containers in %v subnetwork", subID)
+ if subject.Namespace != namespace {
+ return errContainerAndOwnerNamespaceDontMatch
}
return nil
}
-func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error {
- netSetting, err := ns.HomomorphicHashDisabled()
+func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error {
+ netSetting, err := ns.HomomorphicHashDisabled(ctx)
if err != nil {
return fmt.Errorf("could not get setting in contract: %w", err)
}
- if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting {
+ if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting {
return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting)
}
diff --git a/pkg/innerring/processors/container/process_eacl.go b/pkg/innerring/processors/container/process_eacl.go
deleted file mode 100644
index e8bbb5db65..0000000000
--- a/pkg/innerring/processors/container/process_eacl.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package container
-
-import (
- "errors"
- "fmt"
-
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "go.uber.org/zap"
-)
-
-func (cp *Processor) processSetEACL(e container.SetEACL) {
- if !cp.alphabetState.IsAlphabet() {
- cp.log.Info("non alphabet mode, ignore set EACL")
- return
- }
-
- err := cp.checkSetEACL(e)
- if err != nil {
- cp.log.Error("set EACL check failed",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- cp.approveSetEACL(e)
-}
-
-func (cp *Processor) checkSetEACL(e container.SetEACL) error {
- binTable := e.Table()
-
- // unmarshal table
- table := eacl.NewTable()
-
- err := table.Unmarshal(binTable)
- if err != nil {
- return fmt.Errorf("invalid binary table: %w", err)
- }
-
- idCnr, ok := table.CID()
- if !ok {
- return errors.New("missing container ID in eACL table")
- }
-
- // receive owner of the related container
- cnr, err := cntClient.Get(cp.cnrClient, idCnr)
- if err != nil {
- return fmt.Errorf("could not receive the container: %w", err)
- }
-
- // ACL extensions can be disabled by basic ACL, check it
- if !cnr.Value.BasicACL().Extendable() {
- return errors.New("ACL extension disabled by container basic ACL")
- }
-
- err = cp.verifySignature(signatureVerificationData{
- ownerContainer: cnr.Value.Owner(),
- verb: session.VerbContainerSetEACL,
- idContainerSet: true,
- idContainer: idCnr,
- binTokenSession: e.SessionToken(),
- binPublicKey: e.PublicKey(),
- signature: e.Signature(),
- signedData: binTable,
- })
- if err != nil {
- return fmt.Errorf("auth eACL table setting: %w", err)
- }
-
- return nil
-}
-
-func (cp *Processor) approveSetEACL(e container.SetEACL) {
- var err error
-
- prm := cntClient.PutEACLPrm{}
-
- prm.SetTable(e.Table())
- prm.SetKey(e.PublicKey())
- prm.SetSignature(e.Signature())
- prm.SetToken(e.SessionToken())
-
- if nr := e.NotaryRequest(); nr != nil {
- // setEACL event was received via Notary service
- err = cp.cnrClient.Morph().NotarySignAndInvokeTX(nr.MainTransaction)
- } else {
- // setEACL event was received via notification service
- err = cp.cnrClient.PutEACL(prm)
- }
- if err != nil {
- cp.log.Error("could not approve set EACL",
- zap.String("error", err.Error()),
- )
- }
-}
diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go
index ae0d287298..9be93baa4b 100644
--- a/pkg/innerring/processors/container/processor.go
+++ b/pkg/innerring/processors/container/processor.go
@@ -1,48 +1,63 @@
package container
import (
+ "context"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
- morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
+ frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/core/mempoolevent"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
+ }
+
+ ContClient interface {
+ ContractAddress() util.Uint160
+ Get(ctx context.Context, cid []byte) (*containercore.Container, error)
+ }
+
+ MorphClient interface {
+ NotarySignAndInvokeTX(mainTx *transaction.Transaction) error
+ }
+
+ FrostFSIDClient interface {
+ GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error)
}
// Processor of events produced by container contract in the sidechain.
Processor struct {
- log *logger.Logger
- pool *ants.Pool
- alphabetState AlphabetState
- cnrClient *container.Client // notary must be enabled
- idClient *frostfsid.Client
- subnetClient *morphsubnet.Client
- netState NetworkState
- notaryDisabled bool
+ log *logger.Logger
+ metrics metrics.Register
+ pool *ants.Pool
+ alphabetState AlphabetState
+ cnrClient ContClient // notary must be enabled
+ morphClient MorphClient
+ netState NetworkState
+ frostFSIDClient FrostFSIDClient
}
// Params of the processor constructor.
Params struct {
Log *logger.Logger
+ Metrics metrics.Register
PoolSize int
AlphabetState AlphabetState
- ContainerClient *container.Client
- FrostFSIDClient *frostfsid.Client
- SubnetClient *morphsubnet.Client
+ ContainerClient ContClient
+ MorphClient MorphClient
NetworkState NetworkState
- NotaryDisabled bool
+ FrostFSIDClient FrostFSIDClient
}
)
@@ -53,7 +68,7 @@ type NetworkState interface {
//
// Must return any error encountered
// which did not allow reading the value.
- Epoch() (uint64, error)
+ Epoch(ctx context.Context) (uint64, error)
// HomomorphicHashDisabled must return boolean that
// represents homomorphic network state:
@@ -61,16 +76,9 @@ type NetworkState interface {
// * false if hashing is enabled.
//
// which did not allow reading the value.
- HomomorphicHashDisabled() (bool, error)
+ HomomorphicHashDisabled(ctx context.Context) (bool, error)
}
-const (
- putNotification = "containerPut"
- deleteNotification = "containerDelete"
-
- setEACLNotification = "setEACL"
-)
-
// New creates a container contract processor instance.
func New(p *Params) (*Processor, error) {
switch {
@@ -80,95 +88,39 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/container: global state is not set")
case p.ContainerClient == nil:
return nil, errors.New("ir/container: Container client is not set")
- case p.FrostFSIDClient == nil:
- return nil, errors.New("ir/container: FrostFS ID client is not set")
+ case p.MorphClient == nil:
+ return nil, errors.New("ir/container: Morph client is not set")
case p.NetworkState == nil:
return nil, errors.New("ir/container: network state is not set")
- case p.SubnetClient == nil:
- return nil, errors.New("ir/container: subnet client is not set")
+ case p.FrostFSIDClient == nil:
+ return nil, errors.New("ir/container: FrostFSID client is not set")
}
- p.Log.Debug("container worker pool", zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err)
}
- return &Processor{
- log: p.Log,
- pool: pool,
- alphabetState: p.AlphabetState,
- cnrClient: p.ContainerClient,
- idClient: p.FrostFSIDClient,
- netState: p.NetworkState,
- notaryDisabled: p.NotaryDisabled,
- subnetClient: p.SubnetClient,
- }, nil
-}
-
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- if !cp.notaryDisabled {
- return nil
+ metricsRegister := p.Metrics
+ if metricsRegister == nil {
+ metricsRegister = metrics.DefaultRegister{}
}
- var (
- parsers = make([]event.NotificationParserInfo, 0, 3)
-
- p event.NotificationParserInfo
- )
-
- p.SetScriptHash(cp.cnrClient.ContractAddress())
-
- // container put
- p.SetType(event.TypeFromString(putNotification))
- p.SetParser(containerEvent.ParsePut)
- parsers = append(parsers, p)
-
- // container delete
- p.SetType(event.TypeFromString(deleteNotification))
- p.SetParser(containerEvent.ParseDelete)
- parsers = append(parsers, p)
-
- // set eACL
- p.SetType(event.TypeFromString(setEACLNotification))
- p.SetParser(containerEvent.ParseSetEACL)
- parsers = append(parsers, p)
-
- return parsers
+ return &Processor{
+ log: p.Log,
+ metrics: metricsRegister,
+ pool: pool,
+ alphabetState: p.AlphabetState,
+ cnrClient: p.ContainerClient,
+ netState: p.NetworkState,
+ morphClient: p.MorphClient,
+ frostFSIDClient: p.FrostFSIDClient,
+ }, nil
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- if !cp.notaryDisabled {
- return nil
- }
-
- var (
- handlers = make([]event.NotificationHandlerInfo, 0, 3)
-
- h event.NotificationHandlerInfo
- )
-
- h.SetScriptHash(cp.cnrClient.ContractAddress())
-
- // container put
- h.SetType(event.TypeFromString(putNotification))
- h.SetHandler(cp.handlePut)
- handlers = append(handlers, h)
-
- // container delete
- h.SetType(event.TypeFromString(deleteNotification))
- h.SetHandler(cp.handleDelete)
- handlers = append(handlers, h)
-
- // set eACL
- h.SetType(event.TypeFromString(setEACLNotification))
- h.SetHandler(cp.handleSetEACL)
- handlers = append(handlers, h)
-
- return handlers
+ return nil
}
// ListenerNotaryParsers for the 'event.Listener' notary event producer.
@@ -197,11 +149,6 @@ func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
p.SetParser(containerEvent.ParseDeleteNotary)
pp = append(pp, p)
- // set EACL
- p.SetRequestType(containerEvent.SetEACLNotaryEvent)
- p.SetParser(containerEvent.ParseSetEACLNotary)
- pp = append(pp, p)
-
return pp
}
@@ -230,15 +177,5 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
h.SetHandler(cp.handleDelete)
hh = append(hh, h)
- // set eACL
- h.SetRequestType(containerEvent.SetEACLNotaryEvent)
- h.SetHandler(cp.handleSetEACL)
- hh = append(hh, h)
-
return hh
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (cp *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go
index bc0dbec7f4..936de2e774 100644
--- a/pkg/innerring/processors/frostfs/handlers.go
+++ b/pkg/innerring/processors/frostfs/handlers.go
@@ -1,107 +1,91 @@
package frostfs
import (
+ "bytes"
+ "context"
"encoding/hex"
+ "slices"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
- "github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.uber.org/zap"
)
-func (np *Processor) handleDeposit(ev event.Event) {
+func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) {
deposit := ev.(frostfsEvent.Deposit)
- np.log.Info("notification",
+ depositIDBin := bytes.Clone(deposit.ID())
+ slices.Reverse(depositIDBin)
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "deposit"),
- zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID()))))
+ zap.String("id", hex.EncodeToString(depositIDBin)))
// send event to the worker pool
- err := np.pool.Submit(func() { np.processDeposit(&deposit) })
+ err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool {
+ return np.processDeposit(ctx, deposit)
+ })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleWithdraw(ev event.Event) {
+func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) {
withdraw := ev.(frostfsEvent.Withdraw)
- np.log.Info("notification",
+ withdrawBin := bytes.Clone(withdraw.ID())
+ slices.Reverse(withdrawBin)
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "withdraw"),
- zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID()))))
+ zap.String("id", hex.EncodeToString(withdrawBin)))
// send event to the worker pool
- err := np.pool.Submit(func() { np.processWithdraw(&withdraw) })
+ err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool {
+ return np.processWithdraw(ctx, withdraw)
+ })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleCheque(ev event.Event) {
+func (np *Processor) handleCheque(ctx context.Context, ev event.Event) {
cheque := ev.(frostfsEvent.Cheque)
- np.log.Info("notification",
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "cheque"),
zap.String("id", hex.EncodeToString(cheque.ID())))
// send event to the worker pool
- err := np.pool.Submit(func() { np.processCheque(&cheque) })
+ err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool {
+ return np.processCheque(ctx, cheque)
+ })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleConfig(ev event.Event) {
+func (np *Processor) handleConfig(ctx context.Context, ev event.Event) {
cfg := ev.(frostfsEvent.Config)
- np.log.Info("notification",
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "set config"),
zap.String("key", hex.EncodeToString(cfg.Key())),
zap.String("value", hex.EncodeToString(cfg.Value())))
// send event to the worker pool
- err := np.pool.Submit(func() { np.processConfig(&cfg) })
+ err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool {
+ return np.processConfig(ctx, cfg)
+ })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
- zap.Int("capacity", np.pool.Cap()))
- }
-}
-
-func (np *Processor) handleBind(ev event.Event) {
- e := ev.(frostfsEvent.Bind)
- np.log.Info("notification",
- zap.String("type", "bind"),
- )
-
- // send event to the worker pool
-
- err := np.pool.Submit(func() { np.processBind(e) })
- if err != nil {
- // there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
- zap.Int("capacity", np.pool.Cap()))
- }
-}
-
-func (np *Processor) handleUnbind(ev event.Event) {
- e := ev.(frostfsEvent.Unbind)
- np.log.Info("notification",
- zap.String("type", "unbind"),
- )
-
- // send event to the worker pool
-
- err := np.pool.Submit(func() { np.processBind(e) })
- if err != nil {
- // there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go
new file mode 100644
index 0000000000..72310f6f9e
--- /dev/null
+++ b/pkg/innerring/processors/frostfs/handlers_test.go
@@ -0,0 +1,289 @@
+package frostfs
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
+ nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHandleDeposit(t *testing.T) {
+ t.Parallel()
+ es := &testEpochState{
+ epochCounter: 100,
+ }
+ b := &testBalaceClient{}
+ m := &testMorphClient{
+ balance: 150,
+ }
+ proc, err := newTestProc(t, func(p *Params) {
+ p.EpochState = es
+ p.BalanceClient = b
+ p.MorphClient = m
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ ev := frostfsEvent.Deposit{
+ IDValue: []byte{1, 2, 3, 4, 5},
+ FromValue: util.Uint160{100},
+ ToValue: util.Uint160{200},
+ AmountValue: 1000,
+ }
+
+ proc.handleDeposit(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ var expMint balance.MintPrm
+ expMint.SetAmount(ev.AmountValue)
+ expMint.SetID(ev.IDValue)
+ expMint.SetTo(ev.ToValue)
+
+ require.EqualValues(t, []balance.MintPrm{expMint}, b.mint, "invalid mint value")
+ require.EqualValues(t, []transferGas{
+ {
+ receiver: ev.ToValue,
+ amount: fixedn.Fixed8(50),
+ },
+ }, m.transferGas, "invalid transfer gas")
+
+ es.epochCounter = 109
+
+ proc.handleDeposit(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ expMint.SetAmount(ev.AmountValue)
+ expMint.SetID(ev.IDValue)
+ expMint.SetTo(ev.ToValue)
+
+ require.EqualValues(t, []balance.MintPrm{expMint, expMint}, b.mint, "invalid mint value")
+ require.EqualValues(t, []transferGas{
+ {
+ receiver: ev.ToValue,
+ amount: fixedn.Fixed8(50),
+ },
+ }, m.transferGas, "invalid transfer gas")
+}
+
+func TestHandleWithdraw(t *testing.T) {
+ t.Parallel()
+ es := &testEpochState{
+ epochCounter: 100,
+ }
+ b := &testBalaceClient{}
+ m := &testMorphClient{
+ balance: 150,
+ }
+ proc, err := newTestProc(t, func(p *Params) {
+ p.EpochState = es
+ p.BalanceClient = b
+ p.MorphClient = m
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ ev := frostfsEvent.Withdraw{
+ IDValue: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
+ UserValue: util.Uint160{100},
+ AmountValue: 1000,
+ }
+
+ proc.handleWithdraw(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ lock, err := util.Uint160DecodeBytesBE(ev.ID()[:util.Uint160Size])
+ require.NoError(t, err, "failed to decode ID")
+ var expLock balance.LockPrm
+ expLock.SetAmount(ev.AmountValue)
+ expLock.SetID(ev.IDValue)
+ expLock.SetDueEpoch(int64(es.epochCounter) + int64(lockAccountLifetime))
+ expLock.SetLock(lock)
+ expLock.SetUser(ev.UserValue)
+
+ require.EqualValues(t, []balance.LockPrm{expLock}, b.lock, "invalid lock value")
+}
+
+func TestHandleCheque(t *testing.T) {
+ t.Parallel()
+ es := &testEpochState{
+ epochCounter: 100,
+ }
+ b := &testBalaceClient{}
+ m := &testMorphClient{
+ balance: 150,
+ }
+ proc, err := newTestProc(t, func(p *Params) {
+ p.BalanceClient = b
+ p.MorphClient = m
+ p.EpochState = es
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ ev := frostfsEvent.Cheque{
+ IDValue: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
+ UserValue: util.Uint160{100},
+ AmountValue: 1000,
+ LockValue: util.Uint160{200},
+ }
+
+ proc.handleCheque(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ var expBurn balance.BurnPrm
+ expBurn.SetAmount(ev.AmountValue)
+ expBurn.SetID(ev.IDValue)
+ expBurn.SetTo(util.Uint160{200})
+
+ require.EqualValues(t, []balance.BurnPrm{expBurn}, b.burn, "invalid burn value")
+}
+
+func TestHandleConfig(t *testing.T) {
+ t.Parallel()
+ es := &testEpochState{
+ epochCounter: 100,
+ }
+ nm := &testNetmapClient{}
+ m := &testMorphClient{
+ balance: 150,
+ }
+ proc, err := newTestProc(t, func(p *Params) {
+ p.NetmapClient = nm
+ p.MorphClient = m
+ p.EpochState = es
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ ev := frostfsEvent.Config{
+ IDValue: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
+ KeyValue: []byte{1, 2, 3, 4, 5},
+ ValueValue: []byte{6, 7, 8, 9, 0},
+ TxHashValue: util.Uint256{100},
+ }
+
+ proc.handleConfig(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ var expConfig nmClient.SetConfigPrm
+ expConfig.SetHash(ev.TxHashValue)
+ expConfig.SetID(ev.IDValue)
+ expConfig.SetKey(ev.KeyValue)
+ expConfig.SetValue(ev.ValueValue)
+
+ require.EqualValues(t, []nmClient.SetConfigPrm{expConfig}, nm.config, "invalid config value")
+}
+
+func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
+ p := &Params{
+ Log: test.NewLogger(t),
+ PoolSize: 1,
+ FrostFSContract: util.Uint160{0},
+ BalanceClient: &testBalaceClient{},
+ NetmapClient: &testNetmapClient{},
+ MorphClient: &testMorphClient{},
+ EpochState: &testEpochState{},
+ AlphabetState: &testAlphabetState{isAlphabet: true},
+ Converter: &testPrecisionConverter{},
+ MintEmitCacheSize: 100,
+ MintEmitThreshold: 10,
+ MintEmitValue: fixedn.Fixed8(50),
+ GasBalanceThreshold: 50,
+ }
+
+ nonDefault(p)
+
+ return New(p)
+}
+
+type testEpochState struct {
+ epochCounter uint64
+}
+
+func (s *testEpochState) EpochCounter() uint64 {
+ return s.epochCounter
+}
+
+type testAlphabetState struct {
+ isAlphabet bool
+}
+
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+ return s.isAlphabet
+}
+
+type testPrecisionConverter struct{}
+
+func (c *testPrecisionConverter) ToBalancePrecision(v int64) int64 {
+ return v
+}
+
+type testBalaceClient struct {
+ mint []balance.MintPrm
+ lock []balance.LockPrm
+ burn []balance.BurnPrm
+}
+
+func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error {
+ c.mint = append(c.mint, p)
+ return nil
+}
+
+func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error {
+ c.lock = append(c.lock, p)
+ return nil
+}
+
+func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error {
+ c.burn = append(c.burn, p)
+ return nil
+}
+
+type testNetmapClient struct {
+ config []nmClient.SetConfigPrm
+}
+
+func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error {
+ c.config = append(c.config, p)
+ return nil
+}
+
+type transferGas struct {
+ receiver util.Uint160
+ amount fixedn.Fixed8
+}
+
+type testMorphClient struct {
+ balance int64
+ transferGas []transferGas
+}
+
+func (c *testMorphClient) GasBalance() (res int64, err error) {
+ return c.balance, nil
+}
+
+func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error {
+ c.transferGas = append(c.transferGas, transferGas{
+ receiver: receiver,
+ amount: amount,
+ })
+ return nil
+}
diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go
index b28efaa339..d10eb96603 100644
--- a/pkg/innerring/processors/frostfs/process_assets.go
+++ b/pkg/innerring/processors/frostfs/process_assets.go
@@ -1,6 +1,9 @@
package frostfs
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -14,10 +17,10 @@ const (
// Process deposit event by invoking a balance contract and sending native
// gas in the sidechain.
-func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore deposit")
- return
+func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit)
+ return true
}
prm := balance.MintPrm{}
@@ -27,9 +30,9 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
prm.SetID(deposit.ID())
// send transferX to a balance contract
- err := np.balanceClient.Mint(prm)
+ err := np.balanceClient.Mint(ctx, prm)
if err != nil {
- np.log.Error("can't transfer assets to balance contract", zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
}
curEpoch := np.epochState.EpochCounter()
@@ -43,53 +46,55 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val+np.mintEmitThreshold >= curEpoch {
- np.log.Warn("double mint emission declined",
- zap.String("receiver", receiver.String()),
+ np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined,
+ zap.Stringer("receiver", receiver),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))
- return
+ return false
}
// get gas balance of the node
// before gas transfer check if the balance is greater than the threshold
balance, err := np.morphClient.GasBalance()
if err != nil {
- np.log.Error("can't get gas balance of the node", zap.Error(err))
- return
+ np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
+ return false
}
if balance < np.gasBalanceThreshold {
- np.log.Warn("gas balance threshold has been reached",
+ np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached,
zap.Int64("balance", balance),
zap.Int64("threshold", np.gasBalanceThreshold))
- return
+ return false
}
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
- np.log.Error("can't transfer native gas to receiver",
- zap.String("error", err.Error()))
+ np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
+ zap.Error(err))
- return
+ return false
}
np.mintEmitCache.Add(receiver.String(), curEpoch)
+
+ return true
}
// Process withdraw event by locking assets in the balance account.
-func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore withdraw")
- return
+func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw)
+ return true
}
// create lock account
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
if err != nil {
- np.log.Error("can't create lock account", zap.Error(err))
- return
+ np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err))
+ return false
}
curEpoch := np.epochState.EpochCounter()
@@ -102,18 +107,21 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount()))
prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime))
- err = np.balanceClient.Lock(prm)
+ err = np.balanceClient.Lock(ctx, prm)
if err != nil {
- np.log.Error("can't lock assets for withdraw", zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
+ return false
}
+
+ return true
}
// Process cheque event by transferring assets from the lock account back to
// the reserve account.
-func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore cheque")
- return
+func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque)
+ return true
}
prm := balance.BurnPrm{}
@@ -122,8 +130,11 @@ func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) {
prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount()))
prm.SetID(cheque.ID())
- err := np.balanceClient.Burn(prm)
+ err := np.balanceClient.Burn(ctx, prm)
if err != nil {
- np.log.Error("can't transfer assets to fed contract", zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
+ return false
}
+
+ return true
}
diff --git a/pkg/innerring/processors/frostfs/process_bind.go b/pkg/innerring/processors/frostfs/process_bind.go
deleted file mode 100644
index 0abce58279..0000000000
--- a/pkg/innerring/processors/frostfs/process_bind.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package frostfs
-
-import (
- "crypto/elliptic"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "go.uber.org/zap"
-)
-
-type bindCommon interface {
- User() []byte
- Keys() [][]byte
- TxHash() util.Uint256
-}
-
-func (np *Processor) processBind(e bindCommon) {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore bind")
- return
- }
-
- c := &bindCommonContext{
- bindCommon: e,
- }
-
- _, c.bind = e.(frostfs.Bind)
-
- err := np.checkBindCommon(c)
- if err != nil {
- np.log.Error("invalid manage key event",
- zap.Bool("bind", c.bind),
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- np.approveBindCommon(c)
-}
-
-type bindCommonContext struct {
- bindCommon
-
- bind bool
-
- scriptHash util.Uint160
-}
-
-func (np *Processor) checkBindCommon(e *bindCommonContext) error {
- var err error
-
- e.scriptHash, err = util.Uint160DecodeBytesBE(e.User())
- if err != nil {
- return err
- }
-
- curve := elliptic.P256()
-
- for _, key := range e.Keys() {
- _, err = keys.NewPublicKeyFromBytes(key, curve)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (np *Processor) approveBindCommon(e *bindCommonContext) {
- // calculate wallet address
- scriptHash := e.User()
-
- u160, err := util.Uint160DecodeBytesBE(scriptHash)
- if err != nil {
- np.log.Error("could not decode script hash from bytes",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- var id user.ID
- id.SetScriptHash(u160)
-
- prm := frostfsid.CommonBindPrm{}
- prm.SetOwnerID(id.WalletBytes())
- prm.SetKeys(e.Keys())
- prm.SetHash(e.bindCommon.TxHash())
-
- var typ string
- if e.bind {
- typ = "bind"
- err = np.frostfsIDClient.AddKeys(prm)
- } else {
- typ = "unbind"
- err = np.frostfsIDClient.RemoveKeys(prm)
- }
-
- if err != nil {
- np.log.Error(fmt.Sprintf("could not approve %s", typ),
- zap.String("error", err.Error()))
- }
-}
diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go
index ecc90332f9..dc579f6bb5 100644
--- a/pkg/innerring/processors/frostfs/process_config.go
+++ b/pkg/innerring/processors/frostfs/process_config.go
@@ -1,6 +1,9 @@
package frostfs
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"go.uber.org/zap"
@@ -8,10 +11,10 @@ import (
// Process config event by setting configuration value from the mainchain in
// the sidechain.
-func (np *Processor) processConfig(config *frostfsEvent.Config) {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore config")
- return
+func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig)
+ return true
}
prm := nmClient.SetConfigPrm{}
@@ -21,8 +24,11 @@ func (np *Processor) processConfig(config *frostfsEvent.Config) {
prm.SetValue(config.Value())
prm.SetHash(config.TxHash())
- err := np.netmapClient.SetConfig(prm)
+ err := np.netmapClient.SetConfig(ctx, prm)
if err != nil {
- np.log.Error("can't relay set config event", zap.Error(err))
+ np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
+ return false
}
+
+ return true
}
diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go
index e9504cdb4f..9d3bf65cd3 100644
--- a/pkg/innerring/processors/frostfs/processor.go
+++ b/pkg/innerring/processors/frostfs/processor.go
@@ -1,13 +1,13 @@
package frostfs
import (
+ "context"
"errors"
"fmt"
"sync"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -16,7 +16,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
@@ -27,7 +26,7 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
}
// PrecisionConverter converts balance amount values.
@@ -35,35 +34,49 @@ type (
ToBalancePrecision(int64) int64
}
+ BalanceClient interface {
+ Mint(ctx context.Context, p balance.MintPrm) error
+ Lock(ctx context.Context, p balance.LockPrm) error
+ Burn(ctx context.Context, p balance.BurnPrm) error
+ }
+
+ NetmapClient interface {
+ SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error
+ }
+
+ MorphClient interface {
+ GasBalance() (res int64, err error)
+ TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
+ }
+
// Processor of events produced by frostfs contract in main net.
Processor struct {
log *logger.Logger
+ metrics metrics.Register
pool *ants.Pool
frostfsContract util.Uint160
- balanceClient *balance.Client
- netmapClient *nmClient.Client
- morphClient *client.Client
+ balanceClient BalanceClient
+ netmapClient NetmapClient
+ morphClient MorphClient
epochState EpochState
alphabetState AlphabetState
converter PrecisionConverter
- mintEmitLock *sync.Mutex
+ mintEmitLock sync.Mutex
mintEmitCache *lru.Cache[string, uint64]
mintEmitThreshold uint64
mintEmitValue fixedn.Fixed8
gasBalanceThreshold int64
-
- frostfsIDClient *frostfsid.Client
}
// Params of the processor constructor.
Params struct {
Log *logger.Logger
+ Metrics metrics.Register
PoolSize int
FrostFSContract util.Uint160
- FrostFSIDClient *frostfsid.Client
- BalanceClient *balance.Client
- NetmapClient *nmClient.Client
- MorphClient *client.Client
+ BalanceClient BalanceClient
+ NetmapClient NetmapClient
+ MorphClient MorphClient
EpochState EpochState
AlphabetState AlphabetState
Converter PrecisionConverter
@@ -79,8 +92,6 @@ const (
withdrawNotification = "Withdraw"
chequeNotification = "Cheque"
configNotification = "SetConfig"
- bindNotification = "Bind"
- unbindNotification = "Unbind"
)
// New creates frostfs mainnet contract processor instance.
@@ -98,8 +109,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/frostfs: balance precision converter is not set")
}
- p.Log.Debug("frostfs worker pool", zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
@@ -110,8 +119,14 @@ func New(p *Params) (*Processor, error) {
return nil, fmt.Errorf("ir/frostfs: can't create LRU cache for gas emission: %w", err)
}
+ metricsRegister := p.Metrics
+ if metricsRegister == nil {
+ metricsRegister = metrics.DefaultRegister{}
+ }
+
return &Processor{
log: p.Log,
+ metrics: metricsRegister,
pool: pool,
frostfsContract: p.FrostFSContract,
balanceClient: p.BalanceClient,
@@ -120,100 +135,41 @@ func New(p *Params) (*Processor, error) {
epochState: p.EpochState,
alphabetState: p.AlphabetState,
converter: p.Converter,
- mintEmitLock: new(sync.Mutex),
mintEmitCache: lruCache,
mintEmitThreshold: p.MintEmitThreshold,
mintEmitValue: p.MintEmitValue,
gasBalanceThreshold: p.GasBalanceThreshold,
-
- frostfsIDClient: p.FrostFSIDClient,
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- var (
- parsers = make([]event.NotificationParserInfo, 0, 6)
-
- p event.NotificationParserInfo
- )
-
- p.SetScriptHash(np.frostfsContract)
-
- // deposit event
- p.SetType(event.TypeFromString(depositNotification))
- p.SetParser(frostfsEvent.ParseDeposit)
- parsers = append(parsers, p)
-
- // withdraw event
- p.SetType(event.TypeFromString(withdrawNotification))
- p.SetParser(frostfsEvent.ParseWithdraw)
- parsers = append(parsers, p)
-
- // cheque event
- p.SetType(event.TypeFromString(chequeNotification))
- p.SetParser(frostfsEvent.ParseCheque)
- parsers = append(parsers, p)
-
- // config event
- p.SetType(event.TypeFromString(configNotification))
- p.SetParser(frostfsEvent.ParseConfig)
- parsers = append(parsers, p)
-
- // bind event
- p.SetType(event.TypeFromString(bindNotification))
- p.SetParser(frostfsEvent.ParseBind)
- parsers = append(parsers, p)
-
- // unbind event
- p.SetType(event.TypeFromString(unbindNotification))
- p.SetParser(frostfsEvent.ParseUnbind)
- parsers = append(parsers, p)
-
- return parsers
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- var (
- handlers = make([]event.NotificationHandlerInfo, 0, 6)
-
- h event.NotificationHandlerInfo
- )
-
- h.SetScriptHash(np.frostfsContract)
-
- // deposit handler
- h.SetType(event.TypeFromString(depositNotification))
- h.SetHandler(np.handleDeposit)
- handlers = append(handlers, h)
-
- // withdraw handler
- h.SetType(event.TypeFromString(withdrawNotification))
- h.SetHandler(np.handleWithdraw)
- handlers = append(handlers, h)
-
- // cheque handler
- h.SetType(event.TypeFromString(chequeNotification))
- h.SetHandler(np.handleCheque)
- handlers = append(handlers, h)
-
- // config handler
- h.SetType(event.TypeFromString(configNotification))
- h.SetHandler(np.handleConfig)
- handlers = append(handlers, h)
-
- // bind handler
- h.SetType(event.TypeFromString(bindNotification))
- h.SetHandler(np.handleBind)
- handlers = append(handlers, h)
-
- // unbind handler
- h.SetType(event.TypeFromString(unbindNotification))
- h.SetHandler(np.handleUnbind)
- handlers = append(handlers, h)
-
- return handlers
+ return []event.NotificationHandlerInfo{
+ {
+ Contract: np.frostfsContract,
+ Type: event.TypeFromString(depositNotification),
+ Parser: frostfsEvent.ParseDeposit,
+ Handlers: []event.Handler{np.handleDeposit},
+ },
+ {
+ Contract: np.frostfsContract,
+ Type: event.TypeFromString(withdrawNotification),
+ Parser: frostfsEvent.ParseWithdraw,
+ Handlers: []event.Handler{np.handleWithdraw},
+ },
+ {
+ Contract: np.frostfsContract,
+ Type: event.TypeFromString(chequeNotification),
+ Parser: frostfsEvent.ParseCheque,
+ Handlers: []event.Handler{np.handleCheque},
+ },
+ {
+ Contract: np.frostfsContract,
+ Type: event.TypeFromString(configNotification),
+ Parser: frostfsEvent.ParseConfig,
+ Handlers: []event.Handler{np.handleConfig},
+ },
+ }
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
@@ -225,8 +181,3 @@ func (np *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
func (np *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return nil
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (np *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go
index bfa88d3f0e..7e8ab629d0 100644
--- a/pkg/innerring/processors/governance/handlers.go
+++ b/pkg/innerring/processors/governance/handlers.go
@@ -1,6 +1,10 @@
package governance
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
"github.com/nspcc-dev/neo-go/pkg/core/native"
@@ -9,7 +13,7 @@ import (
"go.uber.org/zap"
)
-func (gp *Processor) HandleAlphabetSync(e event.Event) {
+func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) {
var (
typ string
hash util.Uint256
@@ -30,14 +34,16 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
return
}
- gp.log.Info("new event", zap.String("type", typ))
+ gp.log.Info(ctx, logs.GovernanceNewEvent, zap.String("type", typ))
// send event to the worker pool
- err := gp.pool.Submit(func() { gp.processAlphabetSync(hash) })
+ err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool {
+ return gp.processAlphabetSync(ctx, hash)
+ })
if err != nil {
// there system can be moved into controlled degradation stage
- gp.log.Warn("governance worker pool drained",
+ gp.log.Warn(ctx, logs.GovernanceGovernanceWorkerPoolDrained,
zap.Int("capacity", gp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go
new file mode 100644
index 0000000000..864c5da673
--- /dev/null
+++ b/pkg/innerring/processors/governance/handlers_test.go
@@ -0,0 +1,284 @@
+package governance
+
+import (
+ "context"
+ "encoding/binary"
+ "sort"
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHandleAlphabetSyncEvent(t *testing.T) {
+ t.Parallel()
+ testKeys := generateTestKeys(t)
+
+ es := &testEpochState{
+ epoch: 100,
+ }
+ as := &testAlphabetState{
+ isAlphabet: true,
+ }
+ v := &testVoter{}
+ irf := &testIRFetcher{
+ publicKeys: testKeys.sidechainKeys,
+ }
+ m := &testMorphClient{
+ commiteeKeys: testKeys.sidechainKeys,
+ }
+ mn := &testMainnetClient{
+ alphabetKeys: testKeys.mainnetKeys,
+ }
+ f := &testFrostFSClient{}
+
+ proc, err := New(
+ &Params{
+ Log: test.NewLogger(t),
+ EpochState: es,
+ AlphabetState: as,
+ Voter: v,
+ IRFetcher: irf,
+ MorphClient: m,
+ MainnetClient: mn,
+ FrostFSClient: f,
+ },
+ )
+
+ require.NoError(t, err, "failed to create processor")
+
+ ev := Sync{
+ txHash: util.Uint256{100},
+ }
+
+ proc.HandleAlphabetSync(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.EqualValues(t, []VoteValidatorPrm{
+ {
+ Validators: testKeys.newAlphabetExp,
+ Hash: &ev.txHash,
+ },
+ }, v.votes, "invalid vote calls")
+
+ var expAlphabetUpdate client.UpdateAlphabetListPrm
+ expAlphabetUpdate.SetHash(ev.txHash)
+ expAlphabetUpdate.SetList(testKeys.newInnerRingExp)
+ require.EqualValues(t, []client.UpdateAlphabetListPrm{expAlphabetUpdate}, m.alphabetUpdates, "invalid alphabet updates")
+
+ var expNotaryUpdate client.UpdateNotaryListPrm
+ expNotaryUpdate.SetHash(ev.txHash)
+ expNotaryUpdate.SetList(testKeys.newAlphabetExp)
+ require.EqualValues(t, []client.UpdateNotaryListPrm{expNotaryUpdate}, m.notaryUpdates, "invalid notary list updates")
+
+ buf := make([]byte, 8)
+ binary.LittleEndian.PutUint64(buf, es.epoch)
+
+ id := append([]byte(alphabetUpdateIDPrefix), buf...)
+ var expFrostFSAlphabetUpd frostfscontract.AlphabetUpdatePrm
+ expFrostFSAlphabetUpd.SetID(id)
+ expFrostFSAlphabetUpd.SetPubs(testKeys.newAlphabetExp)
+
+ require.EqualValues(t, []frostfscontract.AlphabetUpdatePrm{expFrostFSAlphabetUpd}, f.updates, "invalid FrostFS alphabet updates")
+}
+
+func TestHandleAlphabetDesignateEvent(t *testing.T) {
+ t.Parallel()
+ testKeys := generateTestKeys(t)
+
+ es := &testEpochState{
+ epoch: 100,
+ }
+ as := &testAlphabetState{
+ isAlphabet: true,
+ }
+ v := &testVoter{}
+ irf := &testIRFetcher{
+ publicKeys: testKeys.sidechainKeys,
+ }
+ m := &testMorphClient{
+ commiteeKeys: testKeys.sidechainKeys,
+ }
+ mn := &testMainnetClient{
+ alphabetKeys: testKeys.mainnetKeys,
+ }
+ f := &testFrostFSClient{}
+
+ proc, err := New(
+ &Params{
+ Log: test.NewLogger(t),
+ EpochState: es,
+ AlphabetState: as,
+ Voter: v,
+ IRFetcher: irf,
+ MorphClient: m,
+ MainnetClient: mn,
+ FrostFSClient: f,
+ },
+ )
+
+ require.NoError(t, err, "failed to create processor")
+
+ ev := rolemanagement.Designate{
+ TxHash: util.Uint256{100},
+ Role: noderoles.NeoFSAlphabet,
+ }
+
+ proc.HandleAlphabetSync(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.EqualValues(t, []VoteValidatorPrm{
+ {
+ Validators: testKeys.newAlphabetExp,
+ Hash: &ev.TxHash,
+ },
+ }, v.votes, "invalid vote calls")
+
+ var alpabetUpdExp client.UpdateAlphabetListPrm
+ alpabetUpdExp.SetList(testKeys.newInnerRingExp)
+ alpabetUpdExp.SetHash(ev.TxHash)
+ require.EqualValues(t, []client.UpdateAlphabetListPrm{alpabetUpdExp}, m.alphabetUpdates, "invalid alphabet updates")
+
+ var expNotaryUpdate client.UpdateNotaryListPrm
+ expNotaryUpdate.SetList(testKeys.newAlphabetExp)
+ expNotaryUpdate.SetHash(ev.TxHash)
+ require.EqualValues(t, []client.UpdateNotaryListPrm{expNotaryUpdate}, m.notaryUpdates, "invalid notary list updates")
+
+ buf := make([]byte, 8)
+ binary.LittleEndian.PutUint64(buf, es.epoch)
+
+ id := append([]byte(alphabetUpdateIDPrefix), buf...)
+ var expFrostFSAlphabetUpd frostfscontract.AlphabetUpdatePrm
+ expFrostFSAlphabetUpd.SetID(id)
+ expFrostFSAlphabetUpd.SetPubs(testKeys.newAlphabetExp)
+
+ require.EqualValues(t, []frostfscontract.AlphabetUpdatePrm{expFrostFSAlphabetUpd}, f.updates, "invalid FrostFS alphabet updates")
+}
+
+type testKeys struct {
+ sidechainKeys keys.PublicKeys
+ mainnetKeys keys.PublicKeys
+ newAlphabetExp keys.PublicKeys
+ newInnerRingExp keys.PublicKeys
+}
+
+func generateTestKeys(t *testing.T) testKeys {
+ for {
+ var result testKeys
+
+ for range 4 {
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err, "failed to create private key")
+ result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey())
+ }
+
+ result.mainnetKeys = append(result.mainnetKeys, result.sidechainKeys...)
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err, "failed to create private key")
+ result.mainnetKeys = append(result.mainnetKeys, pk.PublicKey())
+
+ result.newAlphabetExp, err = newAlphabetList(result.sidechainKeys, result.mainnetKeys)
+ require.NoError(t, err, "failed to create expected new alphabet")
+
+ if len(result.newAlphabetExp) == 0 {
+ continue // can be happen because of random and sort
+ }
+
+ var irKeys keys.PublicKeys
+ irKeys = append(irKeys, result.sidechainKeys...)
+ result.newInnerRingExp, err = updateInnerRing(irKeys, result.sidechainKeys, result.newAlphabetExp)
+ require.NoError(t, err, "failed to create expected new IR")
+ sort.Sort(result.newInnerRingExp)
+
+ return result
+ }
+}
+
+type testEpochState struct {
+ epoch uint64
+}
+
+func (s *testEpochState) EpochCounter() uint64 {
+ return s.epoch
+}
+
+type testAlphabetState struct {
+ isAlphabet bool
+}
+
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+ return s.isAlphabet
+}
+
+type testVoter struct {
+ votes []VoteValidatorPrm
+}
+
+func (v *testVoter) VoteForSidechainValidator(_ context.Context, prm VoteValidatorPrm) error {
+ v.votes = append(v.votes, prm)
+ return nil
+}
+
+type testIRFetcher struct {
+ publicKeys keys.PublicKeys
+}
+
+func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) {
+ return f.publicKeys, nil
+}
+
+type testMorphClient struct {
+ commiteeKeys keys.PublicKeys
+
+ alphabetUpdates []client.UpdateAlphabetListPrm
+ notaryUpdates []client.UpdateNotaryListPrm
+}
+
+func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) {
+ return c.commiteeKeys, nil
+}
+
+func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error {
+ c.alphabetUpdates = append(c.alphabetUpdates, prm)
+ return nil
+}
+
+func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error {
+ c.notaryUpdates = append(c.notaryUpdates, prm)
+ return nil
+}
+
+type testMainnetClient struct {
+ alphabetKeys keys.PublicKeys
+ designateHash util.Uint160
+}
+
+func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) {
+ return c.alphabetKeys, nil
+}
+
+func (c *testMainnetClient) GetDesignateHash() util.Uint160 {
+ return c.designateHash
+}
+
+type testFrostFSClient struct {
+ updates []frostfscontract.AlphabetUpdatePrm
+}
+
+func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error {
+ c.updates = append(c.updates, p)
+ return nil
+}
diff --git a/pkg/innerring/processors/governance/list_test.go b/pkg/innerring/processors/governance/list_test.go
index e0121ab625..4ecebf05b4 100644
--- a/pkg/innerring/processors/governance/list_test.go
+++ b/pkg/innerring/processors/governance/list_test.go
@@ -49,7 +49,7 @@ func TestNewAlphabetList(t *testing.T) {
}
ln := len(rounds)
- for i := 0; i < ln; i++ {
+ for i := range ln {
list, err = newAlphabetList(list, exp)
require.NoError(t, err)
require.True(t, equalPublicKeyLists(list, rounds[i]))
@@ -60,8 +60,7 @@ func TestNewAlphabetList(t *testing.T) {
orig := keys.PublicKeys{k[1], k[2], k[3], k[4]}
main := keys.PublicKeys{k[1], k[2], k[5], k[4]}
- exp := make(keys.PublicKeys, len(main))
- copy(exp, main)
+ exp := main.Copy()
sort.Sort(exp)
got, err := newAlphabetList(orig, main)
@@ -132,7 +131,7 @@ func TestUpdateInnerRing(t *testing.T) {
func generateKeys(n int) (keys.PublicKeys, error) {
pubKeys := make(keys.PublicKeys, 0, n)
- for i := 0; i < n; i++ {
+ for range n {
privKey, err := keys.NewPrivateKey()
if err != nil {
return nil, err
diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go
index 913ad3d783..6e22abb3c0 100644
--- a/pkg/innerring/processors/governance/process_update.go
+++ b/pkg/innerring/processors/governance/process_update.go
@@ -1,14 +1,15 @@
package governance
import (
+ "context"
"encoding/binary"
"encoding/hex"
"sort"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
- nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
@@ -18,39 +19,39 @@ const (
alphabetUpdateIDPrefix = "AlphabetUpdate"
)
-func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
- if !gp.alphabetState.IsAlphabet() {
- gp.log.Info("non alphabet mode, ignore alphabet sync")
- return
+func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool {
+ if !gp.alphabetState.IsAlphabet(ctx) {
+ gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
+ return true
}
- mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
+ mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx)
if err != nil {
- gp.log.Error("can't fetch alphabet list from main net",
- zap.String("error", err.Error()))
- return
+ gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
+ zap.Error(err))
+ return false
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
- gp.log.Error("can't fetch alphabet list from side chain",
- zap.String("error", err.Error()))
- return
+ gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
+ zap.Error(err))
+ return false
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
- gp.log.Error("can't merge alphabet lists from main net and side chain",
- zap.String("error", err.Error()))
- return
+ gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
+ zap.Error(err))
+ return false
}
if newAlphabet == nil {
- gp.log.Info("no governance update, alphabet list has not been changed")
- return
+ gp.log.Info(ctx, logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
+ return true
}
- gp.log.Info("alphabet list has been changed, starting update",
+ gp.log.Info(ctx, logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
zap.String("new_alphabet", prettyKeys(newAlphabet)),
)
@@ -61,88 +62,24 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
}
// 1. Vote to sidechain committee via alphabet contracts.
- err = gp.voter.VoteForSidechainValidator(votePrm)
+ err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
if err != nil {
- gp.log.Error("can't vote for side chain committee",
- zap.String("error", err.Error()))
+ gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
+ zap.Error(err))
}
// 2. Update NeoFSAlphabet role in the sidechain.
- innerRing, err := gp.irFetcher.InnerRingKeys()
- if err != nil {
- gp.log.Error("can't fetch inner ring list from side chain",
- zap.String("error", err.Error()))
- } else {
- newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
- if err != nil {
- gp.log.Error("can't create new inner ring list with new alphabet keys",
- zap.String("error", err.Error()))
- } else {
- sort.Sort(newInnerRing)
+ gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash)
- gp.log.Info("update of the inner ring list",
- zap.String("before", prettyKeys(innerRing)),
- zap.String("after", prettyKeys(newInnerRing)),
- )
-
- if gp.notaryDisabled {
- updPrm := nmClient.UpdateIRPrm{}
-
- updPrm.SetKeys(newInnerRing)
- updPrm.SetHash(txHash)
-
- err = gp.netmapClient.UpdateInnerRing(updPrm)
- } else {
- updPrm := client.UpdateAlphabetListPrm{}
-
- updPrm.SetList(newInnerRing)
- updPrm.SetHash(txHash)
-
- err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm)
- }
-
- if err != nil {
- gp.log.Error("can't update inner ring list with new alphabet keys",
- zap.String("error", err.Error()))
- }
- }
- }
-
- if !gp.notaryDisabled {
- // 3. Update notary role in the sidechain.
-
- updPrm := client.UpdateNotaryListPrm{}
-
- updPrm.SetList(newAlphabet)
- updPrm.SetHash(txHash)
-
- err = gp.morphClient.UpdateNotaryList(updPrm)
- if err != nil {
- gp.log.Error("can't update list of notary nodes in side chain",
- zap.String("error", err.Error()))
- }
- }
+ // 3. Update notary role in the sidechain.
+ gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash)
// 4. Update FrostFS contract in the mainnet.
- epoch := gp.epochState.EpochCounter()
+ gp.updateFrostFSContractInMainnet(ctx, newAlphabet)
- buf := make([]byte, 8)
- binary.LittleEndian.PutUint64(buf, epoch)
+ gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate)
- id := append([]byte(alphabetUpdateIDPrefix), buf...)
-
- prm := frostfscontract.AlphabetUpdatePrm{}
-
- prm.SetID(id)
- prm.SetPubs(newAlphabet)
-
- err = gp.frostfsClient.AlphabetUpdate(prm)
- if err != nil {
- gp.log.Error("can't update list of alphabet nodes in frostfs contract",
- zap.String("error", err.Error()))
- }
-
- gp.log.Info("finished alphabet list update")
+ return true
}
func prettyKeys(keys keys.PublicKeys) string {
@@ -156,3 +93,68 @@ func prettyKeys(keys keys.PublicKeys) string {
return strings.TrimRight(sb.String(), delimiter)
}
+
+func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
+ innerRing, err := gp.irFetcher.InnerRingKeys(ctx)
+ if err != nil {
+ gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
+ zap.Error(err))
+ return
+ }
+
+ newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
+ if err != nil {
+ gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
+ zap.Error(err))
+ return
+ }
+
+ sort.Sort(newInnerRing)
+
+ gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList,
+ zap.String("before", prettyKeys(innerRing)),
+ zap.String("after", prettyKeys(newInnerRing)),
+ )
+
+ updPrm := client.UpdateAlphabetListPrm{}
+ updPrm.SetList(newInnerRing)
+ updPrm.SetHash(txHash)
+
+ if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
+ gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
+ zap.Error(err))
+ }
+}
+
+func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) {
+ updPrm := client.UpdateNotaryListPrm{}
+
+ updPrm.SetList(newAlphabet)
+ updPrm.SetHash(txHash)
+
+ err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
+ if err != nil {
+ gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
+ zap.Error(err))
+ }
+}
+
+func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) {
+ epoch := gp.epochState.EpochCounter()
+
+ buf := make([]byte, 8)
+ binary.LittleEndian.PutUint64(buf, epoch)
+
+ id := append([]byte(alphabetUpdateIDPrefix), buf...)
+
+ prm := frostfscontract.AlphabetUpdatePrm{}
+
+ prm.SetID(id)
+ prm.SetPubs(newAlphabet)
+
+ err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
+ if err != nil {
+ gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
+ zap.Error(err))
+ }
+}
diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go
index 9397186ee0..2d131eddab 100644
--- a/pkg/innerring/processors/governance/processor.go
+++ b/pkg/innerring/processors/governance/processor.go
@@ -1,9 +1,11 @@
package governance
import (
+ "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -24,7 +26,7 @@ const ProcessorPoolSize = 1
type (
// AlphabetState is a callback interface for innerring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
}
)
@@ -37,7 +39,7 @@ type VoteValidatorPrm struct {
// Voter is a callback interface for alphabet contract voting.
type Voter interface {
- VoteForSidechainValidator(VoteValidatorPrm) error
+ VoteForSidechainValidator(context.Context, VoteValidatorPrm) error
}
type (
@@ -50,44 +52,59 @@ type (
// Implementation must take into account availability of
// the notary contract.
IRFetcher interface {
- InnerRingKeys() (keys.PublicKeys, error)
+ InnerRingKeys(ctx context.Context) (keys.PublicKeys, error)
+ }
+
+ FrostFSClient interface {
+ AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error
+ }
+
+ NetmapClient interface {
+ UpdateInnerRing(p nmClient.UpdateIRPrm) error
+ }
+
+ MainnetClient interface {
+ NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error)
+ GetDesignateHash() util.Uint160
+ }
+
+ MorphClient interface {
+ Committee() (res keys.PublicKeys, err error)
+ UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error
+ UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error
}
// Processor of events related to governance in the network.
Processor struct {
log *logger.Logger
+ metrics metrics.Register
pool *ants.Pool
- frostfsClient *frostfscontract.Client
- netmapClient *nmClient.Client
+ frostfsClient FrostFSClient
alphabetState AlphabetState
epochState EpochState
voter Voter
irFetcher IRFetcher
- mainnetClient *client.Client
- morphClient *client.Client
-
- notaryDisabled bool
+ mainnetClient MainnetClient
+ morphClient MorphClient
designate util.Uint160
}
// Params of the processor constructor.
Params struct {
- Log *logger.Logger
+ Log *logger.Logger
+ Metrics metrics.Register
AlphabetState AlphabetState
EpochState EpochState
Voter Voter
IRFetcher IRFetcher
- MorphClient *client.Client
- MainnetClient *client.Client
- FrostFSClient *frostfscontract.Client
- NetmapClient *nmClient.Client
-
- NotaryDisabled bool
+ MorphClient MorphClient
+ MainnetClient MainnetClient
+ FrostFSClient FrostFSClient
}
)
@@ -115,41 +132,39 @@ func New(p *Params) (*Processor, error) {
return nil, fmt.Errorf("ir/governance: can't create worker pool: %w", err)
}
+ metricsRegister := p.Metrics
+ if metricsRegister == nil {
+ metricsRegister = metrics.DefaultRegister{}
+ }
+
// result is cached by neo-go, so we can pre-calc it
designate := p.MainnetClient.GetDesignateHash()
return &Processor{
- log: p.Log,
- pool: pool,
- frostfsClient: p.FrostFSClient,
- netmapClient: p.NetmapClient,
- alphabetState: p.AlphabetState,
- epochState: p.EpochState,
- voter: p.Voter,
- irFetcher: p.IRFetcher,
- mainnetClient: p.MainnetClient,
- morphClient: p.MorphClient,
- notaryDisabled: p.NotaryDisabled,
- designate: designate,
+ log: p.Log,
+ metrics: metricsRegister,
+ pool: pool,
+ frostfsClient: p.FrostFSClient,
+ alphabetState: p.AlphabetState,
+ epochState: p.EpochState,
+ voter: p.Voter,
+ irFetcher: p.IRFetcher,
+ mainnetClient: p.MainnetClient,
+ morphClient: p.MorphClient,
+ designate: designate,
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- var pi event.NotificationParserInfo
- pi.SetScriptHash(gp.designate)
- pi.SetType(event.TypeFromString(native.DesignationEventName))
- pi.SetParser(rolemanagement.ParseDesignate)
- return []event.NotificationParserInfo{pi}
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- var hi event.NotificationHandlerInfo
- hi.SetScriptHash(gp.designate)
- hi.SetType(event.TypeFromString(native.DesignationEventName))
- hi.SetHandler(gp.HandleAlphabetSync)
- return []event.NotificationHandlerInfo{hi}
+ return []event.NotificationHandlerInfo{
+ {
+ Contract: gp.designate,
+ Type: event.TypeFromString(native.DesignationEventName),
+ Parser: rolemanagement.ParseDesignate,
+ Handlers: []event.Handler{gp.HandleAlphabetSync},
+ },
+ }
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
@@ -161,8 +176,3 @@ func (gp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
func (gp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return nil
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (gp *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/netmap/cleanup_table.go b/pkg/innerring/processors/netmap/cleanup_table.go
index e4024e95f7..abd5b089a5 100644
--- a/pkg/innerring/processors/netmap/cleanup_table.go
+++ b/pkg/innerring/processors/netmap/cleanup_table.go
@@ -9,7 +9,7 @@ import (
type (
cleanupTable struct {
- *sync.RWMutex
+ sync.RWMutex
enabled bool
threshold uint64
lastAccess map[string]epochStampWithNodeInfo
@@ -24,12 +24,13 @@ type (
epochStamp
binNodeInfo []byte
+
+ maintenance bool
}
)
func newCleanupTable(enabled bool, threshold uint64) cleanupTable {
return cleanupTable{
- RWMutex: new(sync.RWMutex),
enabled: enabled,
threshold: threshold,
lastAccess: make(map[string]epochStampWithNodeInfo),
@@ -59,6 +60,7 @@ func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) {
}
access.binNodeInfo = binNodeInfo
+ access.maintenance = nmNodes[i].Status().IsMaintenance()
newMap[keyString] = access
}
@@ -80,10 +82,7 @@ func (c *cleanupTable) touch(keyString string, now uint64, binNodeInfo []byte) b
result := !ok || access.removeFlag || !bytes.Equal(access.binNodeInfo, binNodeInfo)
access.removeFlag = false // reset remove flag on each touch
- if now > access.epoch {
- access.epoch = now
- }
-
+ access.epoch = max(access.epoch, now)
access.binNodeInfo = binNodeInfo // update binary node info
c.lastAccess[keyString] = access
@@ -106,7 +105,7 @@ func (c *cleanupTable) forEachRemoveCandidate(epoch uint64, f func(string) error
defer c.Unlock()
for keyString, access := range c.lastAccess {
- if epoch-access.epoch > c.threshold {
+ if !access.maintenance && epoch-access.epoch > c.threshold {
access.removeFlag = true // set remove flag
c.lastAccess[keyString] = access
diff --git a/pkg/innerring/processors/netmap/cleanup_table_test.go b/pkg/innerring/processors/netmap/cleanup_table_test.go
index 9597103931..208bd54960 100644
--- a/pkg/innerring/processors/netmap/cleanup_table_test.go
+++ b/pkg/innerring/processors/netmap/cleanup_table_test.go
@@ -124,6 +124,21 @@ func TestCleanupTable(t *testing.T) {
}))
require.EqualValues(t, len(infos)-1, cnt)
})
+
+ t.Run("skip maintenance nodes", func(t *testing.T) {
+ cnt := 0
+ infos[1].SetStatus(netmap.Maintenance)
+ key := netmap.StringifyPublicKey(infos[1])
+ c.update(networkMap, 5)
+
+ require.NoError(t,
+ c.forEachRemoveCandidate(5, func(s string) error {
+ cnt++
+ require.NotEqual(t, s, key)
+ return nil
+ }))
+ require.EqualValues(t, len(infos)-1, cnt)
+ })
})
}
diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go
index 54e4ea3abc..4c7199a49f 100644
--- a/pkg/innerring/processors/netmap/handlers.go
+++ b/pkg/innerring/processors/netmap/handlers.go
@@ -1,121 +1,104 @@
package netmap
import (
+ "context"
"encoding/hex"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
- subnetevents "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
"go.uber.org/zap"
)
-func (np *Processor) HandleNewEpochTick(ev event.Event) {
+func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
_ = ev.(timerEvent.NewEpochTick)
- np.log.Info("tick", zap.String("type", "epoch"))
+ np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch"))
// send an event to the worker pool
- err := np.pool.Submit(func() { np.processNewEpochTick() })
+ err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleNewEpoch(ev event.Event) {
+func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) {
epochEvent := ev.(netmapEvent.NewEpoch)
- np.log.Info("notification",
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "new epoch"),
zap.Uint64("value", epochEvent.EpochNumber()))
// send an event to the worker pool
- err := np.pool.Submit(func() {
- np.processNewEpoch(epochEvent)
+ err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
+ return np.processNewEpoch(ctx, epochEvent)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleAddPeer(ev event.Event) {
+func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
newPeer := ev.(netmapEvent.AddPeer)
- np.log.Info("notification",
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "add peer"),
)
// send an event to the worker pool
- err := np.pool.Submit(func() {
- np.processAddPeer(newPeer)
+ err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
+ return np.processAddPeer(ctx, newPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleUpdateState(ev event.Event) {
+func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
updPeer := ev.(netmapEvent.UpdatePeer)
- np.log.Info("notification",
+ np.log.Info(ctx, logs.Notification,
zap.String("type", "update peer state"),
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
// send event to the worker pool
- err := np.pool.Submit(func() {
- np.processUpdatePeer(updPeer)
+ err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
+ return np.processUpdatePeer(ctx, updPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleCleanupTick(ev event.Event) {
+func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
if !np.netmapSnapshot.enabled {
- np.log.Debug("netmap clean up routine is disabled")
+ np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518)
return
}
cleanup := ev.(netmapCleanupTick)
- np.log.Info("tick", zap.String("type", "netmap cleaner"))
+ np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner"))
// send event to the worker pool
- err := np.pool.Submit(func() {
- np.processNetmapCleanupTick(cleanup)
+ err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
+ return np.processNetmapCleanupTick(ctx, cleanup)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
- zap.Int("capacity", np.pool.Cap()))
- }
-}
-
-func (np *Processor) handleRemoveNode(ev event.Event) {
- removeNode := ev.(subnetevents.RemoveNode)
-
- np.log.Info("notification",
- zap.String("type", "remove node from subnet"),
- zap.String("subnetID", hex.EncodeToString(removeNode.SubnetworkID())),
- zap.String("key", hex.EncodeToString(removeNode.Node())),
- )
-
- err := np.pool.Submit(func() {
- np.processRemoveSubnetNode(removeNode)
- })
- if err != nil {
- // there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go
new file mode 100644
index 0000000000..934c3790d7
--- /dev/null
+++ b/pkg/innerring/processors/netmap/handlers_test.go
@@ -0,0 +1,419 @@
+package netmap
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ netmapContract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
+ timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
+ netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/network/payload"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewEpochTick(t *testing.T) {
+ t.Parallel()
+ es := &testEpochState{
+ counter: 100,
+ }
+ nc := &testNetmapClient{}
+
+ proc, err := newTestProc(t, func(p *Params) {
+ p.CleanupEnabled = true
+ p.EpochState = es
+ p.NetmapClient = nc
+ })
+
+ require.NoError(t, err, "failed to create processor")
+
+ ev := timerEvent.NewEpochTick{}
+ proc.HandleNewEpochTick(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.EqualValues(t, []uint64{101}, nc.newEpochs, "invalid epochs")
+}
+
+func TestNewEpoch(t *testing.T) {
+ t.Parallel()
+ var node1 netmap.NodeInfo
+ key1, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35")
+ require.NoError(t, err, "failed to parse key1")
+ node1.SetPublicKey(key1.Bytes())
+
+ var node2 netmap.NodeInfo
+ key2, err := keys.NewPublicKeyFromString("02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3")
+ require.NoError(t, err, "failed to parse key2")
+ node2.SetPublicKey(key2.Bytes())
+
+ network := &netmap.NetMap{}
+ network.SetNodes([]netmap.NodeInfo{node1, node2})
+
+ es := &testEpochState{
+ counter: 100,
+ duration: 10,
+ }
+ r := &testEpochResetter{}
+ nc := &testNetmapClient{
+ epochDuration: 20,
+ txHeights: map[util.Uint256]uint32{
+ {101}: 10_000,
+ },
+ netmap: network,
+ }
+ eh := &testEventHandler{}
+
+ proc, err := newTestProc(t, func(p *Params) {
+ p.NotaryDepositHandler = eh.Handle
+ p.AlphabetSyncHandler = eh.Handle
+ p.NetmapClient = nc
+ p.EpochTimer = r
+ p.EpochState = es
+ })
+
+ require.NoError(t, err, "failed to create processor")
+
+ ev := netmapEvent.NewEpoch{
+ Num: 101,
+ Hash: util.Uint256{101},
+ }
+ proc.handleNewEpoch(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.Equal(t, nc.epochDuration, es.duration, "invalid epoch duration")
+ require.Equal(t, ev.Num, es.counter, "invalid epoch counter")
+ require.EqualValues(t, []uint32{nc.txHeights[ev.Hash]}, r.timers, "invalid epoch timer resets")
+
+ require.EqualValues(t, []event.Event{
+ governance.NewSyncEvent(ev.TxHash()),
+ ev,
+ }, eh.handledEvents, "invalid handled events")
+}
+
+func TestAddPeer(t *testing.T) {
+ t.Parallel()
+
+ nc := &testNetmapClient{
+ contractAddress: util.Uint160{47},
+ }
+
+ proc, err := newTestProc(t, func(p *Params) {
+ p.NetmapClient = nc
+ })
+
+ require.NoError(t, err, "failed to create processor")
+
+ var node netmap.NodeInfo
+ key, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35")
+ require.NoError(t, err, "failed to parse key")
+ node.SetPublicKey(key.Bytes())
+
+ ev := netmapEvent.AddPeer{
+ NodeBytes: node.Marshal(),
+ Request: &payload.P2PNotaryRequest{
+ MainTransaction: &transaction.Transaction{},
+ },
+ }
+ proc.handleAddPeer(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.Nil(t, nc.notaryInvokes, "invalid notary invokes")
+
+ node.SetStatus(netmap.Online)
+ ev = netmapEvent.AddPeer{
+ NodeBytes: node.Marshal(),
+ Request: &payload.P2PNotaryRequest{
+ MainTransaction: &transaction.Transaction{},
+ },
+ }
+ proc.handleAddPeer(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.EqualValues(t, []notaryInvoke{
+ {
+ contract: nc.contractAddress,
+ fee: 0,
+ nonce: ev.NotaryRequest().MainTransaction.Nonce,
+ vub: nil,
+ method: "addPeerIR",
+ args: []any{node.Marshal()},
+ },
+ }, nc.notaryInvokes, "invalid notary invokes")
+}
+
+func TestUpdateState(t *testing.T) {
+ t.Parallel()
+
+ ns := &testNodeStateSettings{
+ maintAllowed: true,
+ }
+ nc := &testNetmapClient{}
+
+ proc, err := newTestProc(t, func(p *Params) {
+ p.NetmapClient = nc
+ p.NodeStateSettings = ns
+ })
+
+ require.NoError(t, err, "failed to create processor")
+
+ key, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35")
+ require.NoError(t, err, "failed to parse key")
+
+ ev := netmapEvent.UpdatePeer{
+ State: netmapContract.NodeStateOnline,
+ PubKey: key,
+ Request: &payload.P2PNotaryRequest{
+ MainTransaction: &transaction.Transaction{},
+ },
+ }
+ proc.handleUpdateState(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.EqualValues(t, []*transaction.Transaction{ev.Request.MainTransaction}, nc.invokedTxs, "invalid transactions")
+}
+
+func TestCleanupTick(t *testing.T) {
+ t.Parallel()
+
+ nc := &testNetmapClient{
+ contractAddress: util.Uint160{111},
+ }
+ proc, err := newTestProc(t,
+ func(p *Params) {
+ p.NetmapClient = nc
+ p.CleanupEnabled = true
+ },
+ )
+
+ require.NoError(t, err, "failed to create processor")
+
+ key1Str := "038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35"
+ proc.netmapSnapshot.lastAccess[key1Str] = epochStampWithNodeInfo{
+ epochStamp: epochStamp{
+ epoch: 95,
+ removeFlag: false,
+ },
+ }
+ key2Str := "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3"
+ proc.netmapSnapshot.lastAccess[key2Str] = epochStampWithNodeInfo{
+ epochStamp: epochStamp{
+ epoch: 98,
+ removeFlag: false,
+ },
+ }
+
+ ev := netmapCleanupTick{
+ epoch: 100,
+ txHash: util.Uint256{123},
+ }
+
+ proc.handleCleanupTick(context.Background(), ev)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ keyExp, err := keys.NewPublicKeyFromString(key1Str)
+ require.NoError(t, err, "failed to parse expired key")
+
+ updExp := netmapclient.UpdatePeerPrm{}
+ updExp.SetKey(keyExp.Bytes())
+ updExp.SetHash(ev.TxHash())
+
+ require.EqualValues(t, []notaryInvoke{
+ {
+ contract: nc.contractAddress,
+ fee: 0,
+ nonce: uint32(ev.epoch),
+ vub: nil,
+ method: "updateStateIR",
+ args: []any{int64(v2netmap.Offline), keyExp.Bytes()},
+ },
+ }, nc.notaryInvokes, "invalid notary invokes")
+ require.True(t, proc.netmapSnapshot.lastAccess[key1Str].removeFlag, "invalid expired removed flag")
+ require.False(t, proc.netmapSnapshot.lastAccess[key2Str].removeFlag, "invalid non expired removed flag")
+}
+
+func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
+ ns := &testNodeStateSettings{}
+ es := &testEpochState{}
+ r := &testEpochResetter{}
+ as := &testAlphabetState{
+ isAlphabet: true,
+ }
+ nc := &testNetmapClient{}
+ eh := &testEventHandler{}
+
+ p := &Params{
+ Log: test.NewLogger(t),
+ PoolSize: 1,
+ CleanupEnabled: false,
+ CleanupThreshold: 3,
+ NodeStateSettings: ns,
+ NodeValidator: &testValidator{},
+ EpochState: es,
+ EpochTimer: r,
+ AlphabetState: as,
+ NetmapClient: nc,
+ NotaryDepositHandler: eh.Handle,
+ AlphabetSyncHandler: eh.Handle,
+ }
+
+ nonDefault(p)
+
+ return New(p)
+}
+
+type testNodeStateSettings struct {
+ maintAllowed bool
+}
+
+func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error {
+ if s.maintAllowed {
+ return nil
+ }
+ return fmt.Errorf("maintenance mode not allowed")
+}
+
+type testValidator struct{}
+
+func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error {
+ return nil
+}
+
+type testEpochState struct {
+ counter uint64
+ duration uint64
+}
+
+func (s *testEpochState) SetEpochCounter(c uint64) {
+ s.counter = c
+}
+
+func (s *testEpochState) EpochCounter() uint64 {
+ return s.counter
+}
+
+func (s *testEpochState) SetEpochDuration(d uint64) {
+ s.duration = d
+}
+
+func (s *testEpochState) EpochDuration() uint64 {
+ return s.duration
+}
+
+type testEpochResetter struct {
+ timers []uint32
+}
+
+func (r *testEpochResetter) ResetEpochTimer(t uint32) error {
+ r.timers = append(r.timers, t)
+ return nil
+}
+
+type testAlphabetState struct {
+ isAlphabet bool
+}
+
+func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+ return s.isAlphabet
+}
+
+type notaryInvoke struct {
+ contract util.Uint160
+ fee fixedn.Fixed8
+ nonce uint32
+ vub *uint32
+ method string
+ args []any
+}
+
+type testNetmapClient struct {
+ contractAddress util.Uint160
+ epochDuration uint64
+ netmap *netmap.NetMap
+ txHeights map[util.Uint256]uint32
+
+ notaryInvokes []notaryInvoke
+ newEpochs []uint64
+ invokedTxs []*transaction.Transaction
+}
+
+func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
+ c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{
+ contract: contract,
+ fee: fee,
+ nonce: nonce,
+ vub: vub,
+ method: method,
+ args: args,
+ })
+ return nil
+}
+
+func (c *testNetmapClient) ContractAddress() util.Uint160 {
+ return c.contractAddress
+}
+
+func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) {
+ return c.epochDuration, nil
+}
+
+func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) {
+ if res, found := c.txHeights[h]; found {
+ return res, nil
+ }
+ return 0, fmt.Errorf("not found")
+}
+
+func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) {
+ return c.netmap, nil
+}
+
+func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error {
+ c.newEpochs = append(c.newEpochs, epoch)
+ return nil
+}
+
+func (c *testNetmapClient) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
+ return true, nil
+}
+
+func (c *testNetmapClient) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
+ c.invokedTxs = append(c.invokedTxs, mainTx)
+ return nil
+}
+
+type testEventHandler struct {
+ handledEvents []event.Event
+}
+
+func (h *testEventHandler) Handle(_ context.Context, e event.Event) {
+ h.handledEvents = append(h.handledEvents, e)
+}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
index d071a7792a..b81dc9989a 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
@@ -1,10 +1,11 @@
package locode
import (
+ "context"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
+ "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -29,7 +30,7 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record
// - Continent: R.Continent().String().
//
// UN-LOCODE attribute remains untouched.
-func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
+func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error {
attrLocode := n.LOCODE()
if attrLocode == "" {
return nil
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
index 6697391e88..fa2dd1ac1d 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
@@ -1,13 +1,14 @@
package locode_test
import (
+ "context"
"errors"
"fmt"
"testing"
+ locodestd "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode"
- locodestd "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/stretchr/testify/require"
)
@@ -92,7 +93,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
t.Run("w/o locode", func(t *testing.T) {
n := nodeInfoWithSomeAttrs()
- err := validator.VerifyAndUpdate(n)
+ err := validator.VerifyAndUpdate(context.Background(), n)
require.NoError(t, err)
})
@@ -102,7 +103,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttrValue(n, "WRONG LOCODE")
- err := validator.VerifyAndUpdate(n)
+ err := validator.VerifyAndUpdate(context.Background(), n)
require.Error(t, err)
})
@@ -111,7 +112,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"})
- err := validator.VerifyAndUpdate(n)
+ err := validator.VerifyAndUpdate(context.Background(), n)
require.Error(t, err)
})
@@ -119,7 +120,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, r.LOCODE)
- err := validator.VerifyAndUpdate(n)
+ err := validator.VerifyAndUpdate(context.Background(), n)
require.NoError(t, err)
require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode"))
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
index e6332261ea..ba5db9205f 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
@@ -1,45 +1,45 @@
package locode
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
+ "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
)
// Record is an interface of read-only
// FrostFS LOCODE database single entry.
type Record interface {
- // Must return ISO 3166-1 alpha-2
+ // CountryCode must return ISO 3166-1 alpha-2
// country code.
//
// Must not return nil.
CountryCode() *locodedb.CountryCode
- // Must return English short country name
+ // CountryName must return English short country name
// officially used by the ISO 3166
// Maintenance Agency (ISO 3166/MA).
CountryName() string
- // Must return UN/LOCODE 3-character code
+ // LocationCode must return UN/LOCODE 3-character code
// for the location (numerals 2-9 may also
// be used).
//
// Must not return nil.
LocationCode() *locodedb.LocationCode
- // Must return name of the location which
+ // LocationName must return name of the location which
// have been allocated a UN/LOCODE without
// diacritic sign.
LocationName() string
- // Must return ISO 1-3 character alphabetic
+ // SubDivCode Must return ISO 1-3 character alphabetic
// and/or numeric code for the administrative
// division of the country concerned.
SubDivCode() string
- // Must return subdivision name.
+ // SubDivName must return subdivision name.
SubDivName() string
- // Must return existing continent where is
+ // Continent must return existing continent where is
// the location.
//
// Must not return nil.
@@ -49,7 +49,7 @@ type Record interface {
// DB is an interface of read-only
// FrostFS LOCODE database.
type DB interface {
- // Must find the record that corresponds to
+ // Get must find the record that corresponds to
// LOCODE and provides the Record interface.
//
// Must return an error if Record is nil.
diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
index 126f365827..0e4628ac76 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
@@ -1,6 +1,7 @@
package maddress
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -8,7 +9,7 @@ import (
)
// VerifyAndUpdate calls network.VerifyAddress.
-func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
+func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error {
err := network.VerifyMultiAddress(*n)
if err != nil {
return fmt.Errorf("could not verify multiaddress: %w", err)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
index 4094e50a59..03c41a4517 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
@@ -7,6 +7,7 @@ map candidates.
package state
import (
+ "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -23,7 +24,7 @@ type NetworkSettings interface {
// no error if allowed;
// ErrMaintenanceModeDisallowed if disallowed;
// other error if there are any problems with the check.
- MaintenanceModeAllowed() error
+ MaintenanceModeAllowed(ctx context.Context) error
}
// NetMapCandidateValidator represents tool which checks state of nodes which
@@ -55,13 +56,13 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting
// MUST NOT be called before SetNetworkSettings.
//
// See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods.
-func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error {
- if node.IsOnline() {
+func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error {
+ if node.Status().IsOnline() {
return nil
}
- if node.IsMaintenance() {
- return x.netSettings.MaintenanceModeAllowed()
+ if node.Status().IsMaintenance() {
+ return x.netSettings.MaintenanceModeAllowed(ctx)
}
return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE")
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
index a557628f08..cbf48a7101 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
@@ -1,6 +1,7 @@
package state_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@@ -13,7 +14,7 @@ type testNetworkSettings struct {
disallowed bool
}
-func (x testNetworkSettings) MaintenanceModeAllowed() error {
+func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error {
if x.disallowed {
return state.ErrMaintenanceModeDisallowed
}
@@ -41,22 +42,22 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
},
{
name: "ONLINE",
- preparer: (*netmap.NodeInfo).SetOnline,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) },
valid: true,
},
{
name: "OFFLINE",
- preparer: (*netmap.NodeInfo).SetOffline,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Offline) },
valid: false,
},
{
name: "MAINTENANCE/allowed",
- preparer: (*netmap.NodeInfo).SetMaintenance,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
valid: true,
},
{
name: "MAINTENANCE/disallowed",
- preparer: (*netmap.NodeInfo).SetMaintenance,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
valid: false,
validatorPreparer: func(v *state.NetMapCandidateValidator) {
var s testNetworkSettings
@@ -81,7 +82,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
testCase.validatorPreparer(&v)
}
- err := v.VerifyAndUpdate(&node)
+ err := v.VerifyAndUpdate(context.Background(), &node)
if testCase.valid {
require.NoError(t, err, testCase.name)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/subnet/calls.go b/pkg/innerring/processors/netmap/nodevalidation/subnet/calls.go
deleted file mode 100644
index 4c859703db..0000000000
--- a/pkg/innerring/processors/netmap/nodevalidation/subnet/calls.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package subnet
-
-import (
- "fmt"
-
- morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
-)
-
-// VerifyAndUpdate calls subnet contract's `NodeAllowed` method.
-// Removes subnets that have not been approved by the contract.
-func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
- prm := morphsubnet.NodeAllowedPrm{}
-
- err := n.IterateSubnets(func(id subnetid.ID) error {
- // every node can be bootstrapped
- // to the zero subnetwork
- if subnetid.IsZero(id) {
- return nil
- }
-
- prm.SetID(id.Marshal())
- prm.SetNode(n.PublicKey())
-
- res, err := v.subnetClient.NodeAllowed(prm)
- if err != nil {
- return fmt.Errorf("could not call `NodeAllowed` contract method: %w", err)
- }
-
- if !res.Allowed() {
- return netmap.ErrRemoveSubnet
- }
-
- return nil
- })
- if err != nil {
- return fmt.Errorf("could not verify subnet entrance of the node: %w", err)
- }
-
- return nil
-}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/subnet/validator.go b/pkg/innerring/processors/netmap/nodevalidation/subnet/validator.go
deleted file mode 100644
index f9ae4e614e..0000000000
--- a/pkg/innerring/processors/netmap/nodevalidation/subnet/validator.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package subnet
-
-import (
- "errors"
-
- morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
-)
-
-// Validator is an utility that verifies node subnet
-// allowance.
-//
-// For correct operation, Validator must be created
-// using the constructor (New). After successful creation,
-// the Validator is immediately ready to work through API.
-type Validator struct {
- subnetClient *morphsubnet.Client
-}
-
-// Prm groups the required parameters of the Validator's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- SubnetClient *morphsubnet.Client
-}
-
-// New creates a new instance of the Validator.
-//
-// The created Validator does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm) (*Validator, error) {
- switch {
- case prm.SubnetClient == nil:
- return nil, errors.New("ir/nodeValidator: subnet client is not set")
- }
-
- return &Validator{
- subnetClient: prm.SubnetClient,
- }, nil
-}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/validator.go b/pkg/innerring/processors/netmap/nodevalidation/validator.go
index e9b24e0247..3dbe98a8dd 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/validator.go
@@ -1,6 +1,8 @@
package nodevalidation
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -26,9 +28,9 @@ func New(validators ...netmap.NodeValidator) *CompositeValidator {
// VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators.
//
// If error appears, returns it immediately.
-func (c *CompositeValidator) VerifyAndUpdate(ni *apinetmap.NodeInfo) error {
+func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error {
for _, v := range c.validators {
- if err := v.VerifyAndUpdate(ni); err != nil {
+ if err := v.VerifyAndUpdate(ctx, ni); err != nil {
return err
}
}
diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go
index e4425ef17d..8f8cc17ff3 100644
--- a/pkg/innerring/processors/netmap/process_cleanup.go
+++ b/pkg/innerring/processors/netmap/process_cleanup.go
@@ -1,60 +1,57 @@
package netmap
import (
- v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
- netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
-func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore new netmap cleanup tick")
+func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
- return
+ return true
}
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
key, err := keys.NewPublicKeyFromString(s)
if err != nil {
- np.log.Warn("can't decode public key of netmap node",
+ np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode,
zap.String("key", s))
return nil
}
- np.log.Info("vote to remove node from netmap", zap.String("key", s))
+ np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
// In notary environments we call UpdateStateIR method instead of UpdateState.
// It differs from UpdateState only by name, so we can do this in the same form.
// See https://github.com/nspcc-dev/frostfs-contract/issues/225
const methodUpdateStateNotary = "updateStateIR"
- if np.notaryDisabled {
- prm := netmapclient.UpdatePeerPrm{}
-
- prm.SetKey(key.Bytes())
- prm.SetHash(ev.TxHash())
-
- err = np.netmapClient.UpdatePeerState(prm)
- } else {
- err = np.netmapClient.Morph().NotaryInvoke(
- np.netmapClient.ContractAddress(),
- 0,
- uint32(ev.epoch),
- nil,
- methodUpdateStateNotary,
- int64(v2netmap.Offline), key.Bytes(),
- )
- }
+ err = np.netmapClient.MorphNotaryInvoke(
+ ctx,
+ np.netmapClient.ContractAddress(),
+ 0,
+ uint32(ev.epoch),
+ nil,
+ methodUpdateStateNotary,
+ int64(v2netmap.Offline), key.Bytes(),
+ )
if err != nil {
- np.log.Error("can't invoke netmap.UpdateState", zap.Error(err))
+ np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
}
return nil
})
if err != nil {
- np.log.Warn("can't iterate on netmap cleaner cache",
- zap.String("error", err.Error()))
+ np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
+ zap.Error(err))
+ return false
}
+
+ return true
}
diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go
index 48de528e1d..7c78d24a5f 100644
--- a/pkg/innerring/processors/netmap/process_epoch.go
+++ b/pkg/innerring/processors/netmap/process_epoch.go
@@ -1,85 +1,73 @@
package netmap
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"go.uber.org/zap"
)
// Process new epoch notification by setting global epoch value and resetting
// local epoch timer.
-func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
+func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool {
epoch := ev.EpochNumber()
- epochDuration, err := np.netmapClient.EpochDuration()
+ epochDuration, err := np.netmapClient.EpochDuration(ctx)
if err != nil {
- np.log.Warn("can't get epoch duration",
- zap.String("error", err.Error()))
+ np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
+ zap.Error(err))
} else {
np.epochState.SetEpochDuration(epochDuration)
}
np.epochState.SetEpochCounter(epoch)
- h, err := np.netmapClient.Morph().TxHeight(ev.TxHash())
+ h, err := np.netmapClient.MorphTxHeight(ev.TxHash())
if err != nil {
- np.log.Warn("can't get transaction height",
+ np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
zap.String("hash", ev.TxHash().StringLE()),
- zap.String("error", err.Error()))
+ zap.Error(err))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
- np.log.Warn("can't reset epoch timer",
- zap.String("error", err.Error()))
+ np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
+ zap.Error(err))
}
// get new netmap snapshot
- networkMap, err := np.netmapClient.NetMap()
+ networkMap, err := np.netmapClient.NetMap(ctx)
if err != nil {
- np.log.Warn("can't get netmap snapshot to perform cleanup",
- zap.String("error", err.Error()))
+ np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
+ zap.Error(err))
- return
- }
-
- prm := cntClient.StartEstimationPrm{}
-
- prm.SetEpoch(epoch - 1)
- prm.SetHash(ev.TxHash())
-
- if epoch > 0 { // estimates are invalid in genesis epoch
- err = np.containerWrp.StartEstimation(prm)
-
- if err != nil {
- np.log.Warn("can't start container size estimation",
- zap.Uint64("epoch", epoch),
- zap.String("error", err.Error()))
- }
+ return false
}
np.netmapSnapshot.update(*networkMap, epoch)
- np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
- np.handleNewAudit(audit.NewAuditStartEvent(epoch))
- np.handleAuditSettlements(settlement.NewAuditEvent(epoch))
- np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash()))
- np.handleNotaryDeposit(ev)
+ np.handleCleanupTick(ctx, netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
+ np.handleAlphabetSync(ctx, governance.NewSyncEvent(ev.TxHash()))
+ np.handleNotaryDeposit(ctx, ev)
+
+ return true
}
// Process new epoch tick by invoking new epoch method in network map contract.
-func (np *Processor) processNewEpochTick() {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore new epoch tick")
- return
+func (np *Processor) processNewEpochTick(ctx context.Context) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
+ return true
}
nextEpoch := np.epochState.EpochCounter() + 1
- np.log.Debug("next epoch", zap.Uint64("value", nextEpoch))
+ np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
- err := np.netmapClient.NewEpoch(nextEpoch)
+ err := np.netmapClient.NewEpoch(ctx, nextEpoch)
if err != nil {
- np.log.Error("can't invoke netmap.NewEpoch", zap.Error(err))
+ np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
+ return false
}
+
+ return true
}
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index 3734bae014..b5c727cc7c 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -1,54 +1,51 @@
package netmap
import (
- "bytes"
+ "context"
"encoding/hex"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
- subnetEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
"go.uber.org/zap"
)
// Process add peer notification by sanity check of new node
// local epoch timer.
-func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore new peer notification")
- return
+func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
+ return true
}
// check if notary transaction is valid, see #976
- if originalRequest := ev.NotaryRequest(); originalRequest != nil {
- tx := originalRequest.MainTransaction
- ok, err := np.netmapClient.Morph().IsValidScript(tx.Script, tx.Signers)
- if err != nil || !ok {
- np.log.Warn("non-halt notary transaction",
- zap.String("method", "netmap.AddPeer"),
- zap.String("hash", tx.Hash().StringLE()),
- zap.Error(err))
- return
- }
+ tx := ev.NotaryRequest().MainTransaction
+ ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
+ if err != nil || !ok {
+ np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction,
+ zap.String("method", "netmap.AddPeer"),
+ zap.String("hash", tx.Hash().StringLE()),
+ zap.Error(err))
+ return false
}
// unmarshal node info
var nodeInfo netmap.NodeInfo
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
// it will be nice to have tx id at event structure to log it
- np.log.Warn("can't parse network map candidate")
- return
+ np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate)
+ return false
}
// validate and update node info
- err := np.nodeValidator.VerifyAndUpdate(&nodeInfo)
+ err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo)
if err != nil {
- np.log.Warn("could not verify and update information about network map candidate",
- zap.String("error", err.Error()),
+ np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
+ zap.Error(err),
)
- return
+ return false
}
// sort attributes to make it consistent
@@ -61,8 +58,13 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
updated := np.netmapSnapshot.touch(keyString, np.epochState.EpochCounter(), nodeInfoBinary)
- if updated {
- np.log.Info("approving network map candidate",
+ // `processAddPeer` reacts on `AddPeer` notification, `processNewEpoch` - on `NewEpoch`.
+ // This two notification produces in order - `NewEpoch` -> `AddPeer`.
+ // But there is no guarantee that code will be executed in the same order.
+ // That is why we need to perform `addPeerIR` only in case when node is online,
+ // because in scope of this method, contract set state `ONLINE` for the node.
+ if updated && nodeInfo.Status().IsOnline() {
+ np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate,
zap.String("key", keyString))
prm := netmapclient.AddPeerPrm{}
@@ -73,32 +75,30 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
// See https://github.com/nspcc-dev/frostfs-contract/issues/154.
const methodAddPeerNotary = "addPeerIR"
- if nr := ev.NotaryRequest(); nr != nil {
- // create new notary request with the original nonce
- err = np.netmapClient.Morph().NotaryInvoke(
- np.netmapClient.ContractAddress(),
- 0,
- nr.MainTransaction.Nonce,
- nil,
- methodAddPeerNotary,
- nodeInfoBinary,
- )
- } else {
- // notification event case
- err = np.netmapClient.AddPeer(prm)
- }
-
+ // create new notary request with the original nonce
+ err = np.netmapClient.MorphNotaryInvoke(
+ ctx,
+ np.netmapClient.ContractAddress(),
+ 0,
+ ev.NotaryRequest().MainTransaction.Nonce,
+ nil,
+ methodAddPeerNotary,
+ nodeInfoBinary,
+ )
if err != nil {
- np.log.Error("can't invoke netmap.AddPeer", zap.Error(err))
+ np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
+ return false
}
}
+
+ return true
}
// Process update peer notification by sending approval tx to the smart contract.
-func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore update peer notification")
- return
+func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool {
+ if !np.alphabetState.IsAlphabet(ctx) {
+ np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
+ return true
}
// flag node to remove from local view, so it can be re-bootstrapped
@@ -108,104 +108,20 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
var err error
if ev.Maintenance() {
- err = np.nodeStateSettings.MaintenanceModeAllowed()
+ err = np.nodeStateSettings.MaintenanceModeAllowed(ctx)
if err != nil {
- np.log.Info("prevent switching node to maintenance state",
+ np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState,
zap.Error(err),
)
- return
+ return false
}
}
- if nr := ev.NotaryRequest(); nr != nil {
- err = np.netmapClient.Morph().NotarySignAndInvokeTX(nr.MainTransaction)
- } else {
- prm := netmapclient.UpdatePeerPrm{}
-
- switch {
- case ev.Online():
- prm.SetOnline()
- case ev.Maintenance():
- prm.SetMaintenance()
- }
-
- prm.SetKey(ev.PublicKey().Bytes())
-
- err = np.netmapClient.UpdatePeerState(prm)
- }
- if err != nil {
- np.log.Error("can't invoke netmap.UpdatePeer", zap.Error(err))
- }
-}
-
-func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
- if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore remove node from subnet notification")
- return
- }
-
- candidates, err := np.netmapClient.GetCandidates()
- if err != nil {
- np.log.Warn("could not get network map candidates",
- zap.Error(err),
- )
- return
- }
-
- rawSubnet := ev.SubnetworkID()
- var subnetToRemoveFrom subnetid.ID
-
- err = subnetToRemoveFrom.Unmarshal(rawSubnet)
- if err != nil {
- np.log.Warn("could not unmarshal subnet id",
- zap.Error(err),
- )
- return
- }
-
- if subnetid.IsZero(subnetToRemoveFrom) {
- np.log.Warn("got zero subnet in remove node notification")
- return
- }
-
- for i := range candidates {
- if !bytes.Equal(candidates[i].PublicKey(), ev.Node()) {
- continue
- }
-
- err = candidates[i].IterateSubnets(func(subNetID subnetid.ID) error {
- if subNetID.Equals(subnetToRemoveFrom) {
- return netmap.ErrRemoveSubnet
- }
-
- return nil
- })
- if err != nil {
- np.log.Warn("could not iterate over subnetworks of the node", zap.Error(err))
- np.log.Info("vote to remove node from netmap", zap.String("key", hex.EncodeToString(ev.Node())))
-
- prm := netmapclient.UpdatePeerPrm{}
- prm.SetKey(ev.Node())
- prm.SetHash(ev.TxHash())
-
- err = np.netmapClient.UpdatePeerState(prm)
- if err != nil {
- np.log.Error("could not invoke netmap.UpdateState", zap.Error(err))
- return
- }
- } else {
- prm := netmapclient.AddPeerPrm{}
- prm.SetNodeInfo(candidates[i])
- prm.SetHash(ev.TxHash())
-
- err = np.netmapClient.AddPeer(prm)
- if err != nil {
- np.log.Error("could not invoke netmap.AddPeer", zap.Error(err))
- return
- }
- }
-
- break
+ if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
+ np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
+ return false
}
+
+ return true
}
diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go
index de145d48cb..277bca1c33 100644
--- a/pkg/innerring/processors/netmap/processor.go
+++ b/pkg/innerring/processors/netmap/processor.go
@@ -1,21 +1,21 @@
package netmap
import (
+ "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
- subnetEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/mempoolevent"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
type (
@@ -34,14 +34,14 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet() bool
+ IsAlphabet(context.Context) bool
}
// NodeValidator wraps basic method of checking the correctness
// of information about the node and its finalization for adding
// to the network map.
NodeValidator interface {
- // Must verify and optionally update NodeInfo structure.
+ // VerifyAndUpdate must verify and optionally update NodeInfo structure.
//
// Must return an error if NodeInfo input is invalid.
// Must return an error if it is not possible to correctly
@@ -49,68 +49,65 @@ type (
//
// If no error occurs, the parameter must point to the
// ready-made NodeInfo structure.
- VerifyAndUpdate(*netmap.NodeInfo) error
+ VerifyAndUpdate(context.Context, *netmap.NodeInfo) error
+ }
+
+ Client interface {
+ MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
+ ContractAddress() util.Uint160
+ EpochDuration(ctx context.Context) (uint64, error)
+ MorphTxHeight(h util.Uint256) (res uint32, err error)
+ NetMap(ctx context.Context) (*netmap.NetMap, error)
+ NewEpoch(ctx context.Context, epoch uint64) error
+ MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error)
+ MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error
}
// Processor of events produced by network map contract
// and new epoch ticker, because it is related to contract.
Processor struct {
log *logger.Logger
+ metrics metrics.Register
pool *ants.Pool
epochTimer EpochTimerReseter
epochState EpochState
alphabetState AlphabetState
- netmapClient *nmClient.Client
- containerWrp *container.Client
-
- subnetContract util.Uint160
+ netmapClient Client
netmapSnapshot cleanupTable
- handleNewAudit event.Handler
- handleAuditSettlements event.Handler
- handleAlphabetSync event.Handler
- handleNotaryDeposit event.Handler
+ handleAlphabetSync event.Handler
+ handleNotaryDeposit event.Handler
nodeValidator NodeValidator
- notaryDisabled bool
-
nodeStateSettings state.NetworkSettings
}
// Params of the processor constructor.
Params struct {
Log *logger.Logger
+ Metrics metrics.Register
PoolSize int
- NetmapClient *nmClient.Client
+ NetmapClient Client
EpochTimer EpochTimerReseter
EpochState EpochState
AlphabetState AlphabetState
CleanupEnabled bool
CleanupThreshold uint64 // in epochs
- ContainerWrapper *container.Client
- SubnetContract *util.Uint160
- HandleAudit event.Handler
- AuditSettlementsHandler event.Handler
- AlphabetSyncHandler event.Handler
- NotaryDepositHandler event.Handler
+ AlphabetSyncHandler event.Handler
+ NotaryDepositHandler event.Handler
NodeValidator NodeValidator
- NotaryDisabled bool
-
NodeStateSettings state.NetworkSettings
}
)
const (
- newEpochNotification = "NewEpoch"
- addPeerNotification = "AddPeer"
- updatePeerStateNotification = "UpdateState"
- removeNodeNotification = "RemoveNode"
+ newEpochNotification = "NewEpoch"
)
// New creates network map contract processor instance.
@@ -124,44 +121,35 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/netmap: global state is not set")
case p.AlphabetState == nil:
return nil, errors.New("ir/netmap: global state is not set")
- case p.HandleAudit == nil:
- return nil, errors.New("ir/netmap: audit handler is not set")
- case p.AuditSettlementsHandler == nil:
- return nil, errors.New("ir/netmap: audit settlement handler is not set")
case p.AlphabetSyncHandler == nil:
return nil, errors.New("ir/netmap: alphabet sync handler is not set")
case p.NotaryDepositHandler == nil:
return nil, errors.New("ir/netmap: notary deposit handler is not set")
- case p.ContainerWrapper == nil:
- return nil, errors.New("ir/netmap: container contract wrapper is not set")
case p.NodeValidator == nil:
return nil, errors.New("ir/netmap: node validator is not set")
- case p.SubnetContract == nil:
- return nil, errors.New("ir/netmap: subnet contract script hash is not set")
case p.NodeStateSettings == nil:
return nil, errors.New("ir/netmap: node state settings is not set")
}
- p.Log.Debug("netmap worker pool", zap.Int("size", p.PoolSize))
-
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err)
}
+ metricsRegister := p.Metrics
+ if metricsRegister == nil {
+ metricsRegister = metrics.DefaultRegister{}
+ }
+
return &Processor{
log: p.Log,
+ metrics: metricsRegister,
pool: pool,
epochTimer: p.EpochTimer,
epochState: p.EpochState,
alphabetState: p.AlphabetState,
netmapClient: p.NetmapClient,
- containerWrp: p.ContainerWrapper,
netmapSnapshot: newCleanupTable(p.CleanupEnabled, p.CleanupThreshold),
- handleNewAudit: p.HandleAudit,
- subnetContract: *p.SubnetContract,
-
- handleAuditSettlements: p.AuditSettlementsHandler,
handleAlphabetSync: p.AlphabetSyncHandler,
@@ -169,84 +157,20 @@ func New(p *Params) (*Processor, error) {
nodeValidator: p.NodeValidator,
- notaryDisabled: p.NotaryDisabled,
-
nodeStateSettings: p.NodeStateSettings,
}, nil
}
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- parsers := make([]event.NotificationParserInfo, 0, 3)
-
- var p event.NotificationParserInfo
-
- // remove node from subnetwork event
- p.SetScriptHash(np.subnetContract)
- p.SetType(removeNodeNotification)
- p.SetParser(subnetEvent.ParseRemoveNode)
-
- parsers = append(parsers, p)
-
- p.SetScriptHash(np.netmapClient.ContractAddress())
-
- // new epoch event
- p.SetType(newEpochNotification)
- p.SetParser(netmapEvent.ParseNewEpoch)
- parsers = append(parsers, p)
-
- if !np.notaryDisabled {
- return parsers
- }
-
- // new peer event
- p.SetType(addPeerNotification)
- p.SetParser(netmapEvent.ParseAddPeer)
- parsers = append(parsers, p)
-
- // update peer event
- p.SetType(updatePeerStateNotification)
- p.SetParser(netmapEvent.ParseUpdatePeer)
- parsers = append(parsers, p)
-
- return parsers
-}
-
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- handlers := make([]event.NotificationHandlerInfo, 0, 3)
-
- var i event.NotificationHandlerInfo
-
- // remove node from subnetwork event
- i.SetScriptHash(np.subnetContract)
- i.SetType(removeNodeNotification)
- i.SetHandler(np.handleRemoveNode)
-
- handlers = append(handlers, i)
-
- i.SetScriptHash(np.netmapClient.ContractAddress())
-
- // new epoch handler
- i.SetType(newEpochNotification)
- i.SetHandler(np.handleNewEpoch)
- handlers = append(handlers, i)
-
- if !np.notaryDisabled {
- return handlers
+ return []event.NotificationHandlerInfo{
+ {
+ Contract: np.netmapClient.ContractAddress(),
+ Type: newEpochNotification,
+ Parser: netmapEvent.ParseNewEpoch,
+ Handlers: []event.Handler{np.handleNewEpoch},
+ },
}
-
- // new peer handler
- i.SetType(addPeerNotification)
- i.SetHandler(np.handleAddPeer)
- handlers = append(handlers, i)
-
- // update peer handler
- i.SetType(updatePeerStateNotification)
- i.SetHandler(np.handleUpdateState)
- handlers = append(handlers, i)
-
- return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
@@ -296,8 +220,3 @@ func (np *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return hh
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (np *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go
new file mode 100644
index 0000000000..310f12248d
--- /dev/null
+++ b/pkg/innerring/processors/netmap/wrappers.go
@@ -0,0 +1,63 @@
+package netmap
+
+import (
+ "context"
+
+ netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+)
+
+func NewNetmapClient(netmapClient *netmapclient.Client) Client {
+ return &netmapClientWrapper{
+ netmapClient: netmapClient,
+ }
+}
+
+type netmapClientWrapper struct {
+ netmapClient *netmapclient.Client
+}
+
+func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error {
+ _, err := w.netmapClient.UpdatePeerState(ctx, p)
+ return err
+}
+
+func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
+ _, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...)
+ return err
+}
+
+func (w *netmapClientWrapper) ContractAddress() util.Uint160 {
+ return w.netmapClient.ContractAddress()
+}
+
+func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) {
+ return w.netmapClient.EpochDuration(ctx)
+}
+
+func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) {
+ return w.netmapClient.Morph().TxHeight(h)
+}
+
+func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) {
+ return w.netmapClient.NetMap(ctx)
+}
+
+func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error {
+ return w.netmapClient.NewEpoch(ctx, epoch)
+}
+
+func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
+ return w.netmapClient.Morph().IsValidScript(script, signers)
+}
+
+func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error {
+ return w.netmapClient.AddPeer(ctx, p)
+}
+
+func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
+ return w.netmapClient.Morph().NotarySignAndInvokeTX(mainTx)
+}
diff --git a/pkg/innerring/processors/reputation/handlers.go b/pkg/innerring/processors/reputation/handlers.go
deleted file mode 100644
index 36c9579e53..0000000000
--- a/pkg/innerring/processors/reputation/handlers.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package reputation
-
-import (
- "encoding/hex"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
- "go.uber.org/zap"
-)
-
-func (rp *Processor) handlePutReputation(ev event.Event) {
- put := ev.(reputationEvent.Put)
- peerID := put.PeerID()
-
- // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
- rp.log.Info("notification",
- zap.String("type", "reputation put"),
- zap.String("peer_id", hex.EncodeToString(peerID.PublicKey())))
-
- // send event to the worker pool
-
- err := rp.pool.Submit(func() { rp.processPut(&put) })
- if err != nil {
- // there system can be moved into controlled degradation stage
- rp.log.Warn("reputation worker pool drained",
- zap.Int("capacity", rp.pool.Cap()))
- }
-}
diff --git a/pkg/innerring/processors/reputation/process_put.go b/pkg/innerring/processors/reputation/process_put.go
deleted file mode 100644
index 31e93763b4..0000000000
--- a/pkg/innerring/processors/reputation/process_put.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package reputation
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
-
- repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
- reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "go.uber.org/zap"
-)
-
-var errWrongManager = errors.New("got manager that is incorrect for peer")
-
-func (rp *Processor) processPut(e *reputationEvent.Put) {
- if !rp.alphabetState.IsAlphabet() {
- rp.log.Info("non alphabet mode, ignore reputation put notification")
- return
- }
-
- epoch := e.Epoch()
- id := e.PeerID()
- value := e.Value()
-
- // check if epoch is valid
- currentEpoch := rp.epochState.EpochCounter()
- if epoch >= currentEpoch {
- rp.log.Info("ignore reputation value",
- zap.String("reason", "invalid epoch number"),
- zap.Uint64("trust_epoch", epoch),
- zap.Uint64("local_epoch", currentEpoch))
-
- return
- }
-
- // check signature
- if !value.VerifySignature() {
- rp.log.Info("ignore reputation value",
- zap.String("reason", "invalid signature"),
- )
-
- return
- }
-
- // check if manager is correct
- if err := rp.checkManagers(epoch, value.Manager(), id); err != nil {
- rp.log.Info("ignore reputation value",
- zap.String("reason", "wrong manager"),
- zap.String("error", err.Error()))
-
- return
- }
-
- rp.approvePutReputation(e)
-}
-
-func (rp *Processor) checkManagers(e uint64, mng apireputation.PeerID, peer apireputation.PeerID) error {
- mm, err := rp.mngBuilder.BuildManagers(e, peer)
- if err != nil {
- return fmt.Errorf("could not build managers: %w", err)
- }
-
- for _, m := range mm {
- // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
- if bytes.Equal(mng.PublicKey(), m.PublicKey()) {
- return nil
- }
- }
-
- return errWrongManager
-}
-
-func (rp *Processor) approvePutReputation(e *reputationEvent.Put) {
- var (
- id = e.PeerID()
- err error
- )
-
- if nr := e.NotaryRequest(); nr != nil {
- // put event was received via Notary service
- err = rp.reputationWrp.Morph().NotarySignAndInvokeTX(nr.MainTransaction)
- } else {
- args := repClient.PutPrm{}
- args.SetEpoch(e.Epoch())
- args.SetPeerID(id)
- args.SetValue(e.Value())
-
- err = rp.reputationWrp.Put(args)
- }
- if err != nil {
- // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
- rp.log.Warn("can't send approval tx for reputation value",
- zap.String("peer_id", hex.EncodeToString(id.PublicKey())),
- zap.String("error", err.Error()))
- }
-}
diff --git a/pkg/innerring/processors/reputation/processor.go b/pkg/innerring/processors/reputation/processor.go
deleted file mode 100644
index 990358257d..0000000000
--- a/pkg/innerring/processors/reputation/processor.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package reputation
-
-import (
- "errors"
- "fmt"
-
- repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent"
- "github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
-)
-
-type (
- // EpochState is a callback interface for inner ring global state.
- EpochState interface {
- EpochCounter() uint64
- }
-
- // AlphabetState is a callback interface for inner ring global state.
- AlphabetState interface {
- IsAlphabet() bool
- }
-
- // Processor of events produced by reputation contract.
- Processor struct {
- log *logger.Logger
- pool *ants.Pool
-
- epochState EpochState
- alphabetState AlphabetState
-
- reputationWrp *repClient.Client
-
- mngBuilder common.ManagerBuilder
-
- notaryDisabled bool
- }
-
- // Params of the processor constructor.
- Params struct {
- Log *logger.Logger
- PoolSize int
- EpochState EpochState
- AlphabetState AlphabetState
- ReputationWrapper *repClient.Client
- ManagerBuilder common.ManagerBuilder
- NotaryDisabled bool
- }
-)
-
-const (
- putReputationNotification = "reputationPut"
-)
-
-// New creates reputation contract processor instance.
-func New(p *Params) (*Processor, error) {
- switch {
- case p.Log == nil:
- return nil, errors.New("ir/reputation: logger is not set")
- case p.EpochState == nil:
- return nil, errors.New("ir/reputation: global state is not set")
- case p.AlphabetState == nil:
- return nil, errors.New("ir/reputation: global state is not set")
- case p.ReputationWrapper == nil:
- return nil, errors.New("ir/reputation: reputation contract wrapper is not set")
- case p.ManagerBuilder == nil:
- return nil, errors.New("ir/reputation: manager builder is not set")
- }
-
- p.Log.Debug("reputation worker pool", zap.Int("size", p.PoolSize))
-
- pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
- if err != nil {
- return nil, fmt.Errorf("ir/reputation: can't create worker pool: %w", err)
- }
-
- return &Processor{
- log: p.Log,
- pool: pool,
- epochState: p.EpochState,
- alphabetState: p.AlphabetState,
- reputationWrp: p.ReputationWrapper,
- mngBuilder: p.ManagerBuilder,
- notaryDisabled: p.NotaryDisabled,
- }, nil
-}
-
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (rp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- if !rp.notaryDisabled {
- return nil
- }
-
- var parsers []event.NotificationParserInfo
-
- // put reputation event
- put := event.NotificationParserInfo{}
- put.SetType(putReputationNotification)
- put.SetScriptHash(rp.reputationWrp.ContractAddress())
- put.SetParser(reputationEvent.ParsePut)
- parsers = append(parsers, put)
-
- return parsers
-}
-
-// ListenerNotificationHandlers for the 'event.Listener' event producer.
-func (rp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- if !rp.notaryDisabled {
- return nil
- }
-
- var handlers []event.NotificationHandlerInfo
-
- // put reputation handler
- put := event.NotificationHandlerInfo{}
- put.SetType(putReputationNotification)
- put.SetScriptHash(rp.reputationWrp.ContractAddress())
- put.SetHandler(rp.handlePutReputation)
- handlers = append(handlers, put)
-
- return handlers
-}
-
-// ListenerNotaryParsers for the 'event.Listener' notary event producer.
-func (rp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
- var p event.NotaryParserInfo
-
- p.SetMempoolType(mempoolevent.TransactionAdded)
- p.SetRequestType(reputationEvent.PutNotaryEvent)
- p.SetScriptHash(rp.reputationWrp.ContractAddress())
- p.SetParser(reputationEvent.ParsePutNotary)
-
- return []event.NotaryParserInfo{p}
-}
-
-// ListenerNotaryHandlers for the 'event.Listener' notary event producer.
-func (rp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
- var h event.NotaryHandlerInfo
-
- h.SetMempoolType(mempoolevent.TransactionAdded)
- h.SetRequestType(reputationEvent.PutNotaryEvent)
- h.SetScriptHash(rp.reputationWrp.ContractAddress())
- h.SetHandler(rp.handlePutReputation)
-
- return []event.NotaryHandlerInfo{h}
-}
-
-// TimersHandlers for the 'Timers' event producer.
-func (rp *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/settlement/audit/calculate.go b/pkg/innerring/processors/settlement/audit/calculate.go
deleted file mode 100644
index d819865d8e..0000000000
--- a/pkg/innerring/processors/settlement/audit/calculate.go
+++ /dev/null
@@ -1,335 +0,0 @@
-package audit
-
-import (
- "bytes"
- "crypto/ecdsa"
- "crypto/elliptic"
- "encoding/hex"
- "math/big"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "go.uber.org/zap"
-)
-
-// CalculatePrm groups the required parameters of
-// Calculator.CalculateForEpoch call.
-type CalculatePrm struct {
- // Number of epoch to perform the calculation.
- Epoch uint64
-}
-
-type singleResultCtx struct {
- eAudit uint64
-
- auditResult *audit.Result
-
- log *logger.Logger
-
- txTable *common.TransferTable
-
- cnrInfo common.ContainerInfo
-
- cnrNodes []common.NodeInfo
-
- passNodes map[string]common.NodeInfo
-
- sumSGSize *big.Int
-
- auditFee *big.Int
-}
-
-var (
- bigGB = big.NewInt(1 << 30)
- bigZero = big.NewInt(0)
- bigOne = big.NewInt(1)
-)
-
-// Calculate calculates payments for audit results in a specific epoch of the network.
-// Wraps the results in a money transfer transaction and sends it to the network.
-func (c *Calculator) Calculate(p *CalculatePrm) {
- log := &logger.Logger{Logger: c.opts.log.With(
- zap.Uint64("current epoch", p.Epoch),
- )}
-
- if p.Epoch == 0 {
- log.Info("settlements are ignored for zero epoch")
- return
- }
-
- log.Info("calculate audit settlements")
-
- log.Debug("getting results for the previous epoch")
- prevEpoch := p.Epoch - 1
-
- auditResults, err := c.prm.ResultStorage.AuditResultsForEpoch(prevEpoch)
- if err != nil {
- log.Error("could not collect audit results")
- return
- } else if len(auditResults) == 0 {
- log.Debug("no audit results in previous epoch")
- return
- }
-
- auditFee, err := c.prm.AuditFeeFetcher.AuditFee()
- if err != nil {
- log.Warn("can't fetch audit fee from network config",
- zap.String("error", err.Error()))
- auditFee = 0
- }
-
- log.Debug("processing audit results",
- zap.Int("number", len(auditResults)),
- )
-
- table := common.NewTransferTable()
-
- for i := range auditResults {
- c.processResult(&singleResultCtx{
- log: log,
- auditResult: auditResults[i],
- txTable: table,
- auditFee: big.NewInt(0).SetUint64(auditFee),
- })
- }
-
- log.Debug("processing transfers")
-
- common.TransferAssets(c.prm.Exchanger, table, common.AuditSettlementDetails(prevEpoch))
-}
-
-func (c *Calculator) processResult(ctx *singleResultCtx) {
- ctx.log = &logger.Logger{Logger: ctx.log.With(
- zap.Stringer("cid", ctx.containerID()),
- zap.Uint64("audit epoch", ctx.auditResult.Epoch()),
- )}
-
- ctx.log.Debug("reading information about the container")
-
- ok := c.readContainerInfo(ctx)
- if !ok {
- return
- }
-
- ctx.log.Debug("building placement")
-
- ok = c.buildPlacement(ctx)
- if !ok {
- return
- }
-
- ctx.log.Debug("collecting passed nodes")
-
- ok = c.collectPassNodes(ctx)
- if !ok {
- return
- }
-
- ctx.log.Debug("calculating sum of the sizes of all storage groups")
-
- ok = c.sumSGSizes(ctx)
- if !ok {
- return
- }
-
- ctx.log.Debug("filling transfer table")
-
- c.fillTransferTable(ctx)
-}
-
-func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool {
- cnr, ok := ctx.auditResult.Container()
- if !ok {
- ctx.log.Error("missing container in audit result")
- return false
- }
-
- var err error
-
- ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(cnr)
- if err != nil {
- ctx.log.Error("could not get container info",
- zap.String("error", err.Error()),
- )
- }
-
- return err == nil
-}
-
-func (c *Calculator) buildPlacement(ctx *singleResultCtx) bool {
- var err error
-
- ctx.cnrNodes, err = c.prm.PlacementCalculator.ContainerNodes(ctx.auditEpoch(), ctx.containerID())
- if err != nil {
- ctx.log.Error("could not get container nodes",
- zap.String("error", err.Error()),
- )
- }
-
- empty := len(ctx.cnrNodes) == 0
- if empty {
- ctx.log.Debug("empty list of container nodes")
- }
-
- return err == nil && !empty
-}
-
-func (c *Calculator) collectPassNodes(ctx *singleResultCtx) bool {
- ctx.passNodes = make(map[string]common.NodeInfo)
-
- for _, cnrNode := range ctx.cnrNodes {
- // TODO(@cthulhu-rider): neofs-sdk-go#241 use dedicated method
- ctx.auditResult.IteratePassedStorageNodes(func(passNode []byte) bool {
- if !bytes.Equal(cnrNode.PublicKey(), passNode) {
- return true
- }
-
- failed := false
-
- ctx.auditResult.IterateFailedStorageNodes(func(failNode []byte) bool {
- failed = bytes.Equal(cnrNode.PublicKey(), failNode)
- return !failed
- })
-
- if !failed {
- ctx.passNodes[hex.EncodeToString(passNode)] = cnrNode
- }
-
- return false
- })
- }
-
- empty := len(ctx.passNodes) == 0
- if empty {
- ctx.log.Debug("none of the container nodes passed the audit")
- }
-
- return !empty
-}
-
-func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
- sumPassSGSize := uint64(0)
- fail := false
-
- var addr oid.Address
- addr.SetContainer(ctx.containerID())
-
- ctx.auditResult.IteratePassedStorageGroups(func(id oid.ID) bool {
- addr.SetObject(id)
-
- sgInfo, err := c.prm.SGStorage.SGInfo(addr)
- if err != nil {
- ctx.log.Error("could not get SG info",
- zap.String("id", id.String()),
- zap.String("error", err.Error()),
- )
-
- fail = true
-
- return false // we also can continue and calculate at least some part
- }
-
- sumPassSGSize += sgInfo.Size()
-
- return true
- })
-
- if fail {
- return false
- }
-
- if sumPassSGSize == 0 {
- ctx.log.Debug("zero sum SG size")
- return false
- }
-
- ctx.sumSGSize = big.NewInt(int64(sumPassSGSize))
-
- return true
-}
-
-func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
- cnrOwner := ctx.cnrInfo.Owner()
-
- // add txs to pay for storage node
- for k, info := range ctx.passNodes {
- ownerID, err := c.prm.AccountStorage.ResolveKey(info)
- if err != nil {
- ctx.log.Error("could not resolve public key of the storage node",
- zap.String("error", err.Error()),
- zap.String("key", k),
- )
-
- return false // we also can continue and calculate at least some part
- }
-
- price := info.Price()
-
- ctx.log.Debug("calculating storage node salary for audit (GASe-12)",
- zap.Stringer("sum SG size", ctx.sumSGSize),
- zap.Stringer("price", price),
- )
-
- fee := big.NewInt(0).Mul(price, ctx.sumSGSize)
- fee.Div(fee, bigGB)
-
- if fee.Cmp(bigZero) == 0 {
- fee.Add(fee, bigOne)
- }
-
- ctx.txTable.Transfer(&common.TransferTx{
- From: cnrOwner,
- To: *ownerID,
- Amount: fee,
- })
- }
-
- // add txs to pay inner ring node for audit result
- auditIR, err := ownerFromKey(ctx.auditResult.AuditorKey())
- if err != nil {
- ctx.log.Error("could not parse public key of the inner ring node",
- zap.String("error", err.Error()),
- zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorKey())),
- )
-
- return false
- }
-
- ctx.txTable.Transfer(&common.TransferTx{
- From: cnrOwner,
- To: *auditIR,
- Amount: ctx.auditFee,
- })
-
- return false
-}
-
-func (c *singleResultCtx) containerID() cid.ID {
- cnr, _ := c.auditResult.Container()
- return cnr
-}
-
-func (c *singleResultCtx) auditEpoch() uint64 {
- if c.eAudit == 0 {
- c.eAudit = c.auditResult.Epoch()
- }
-
- return c.eAudit
-}
-
-func ownerFromKey(key []byte) (*user.ID, error) {
- pubKey, err := keys.NewPublicKeyFromBytes(key, elliptic.P256())
- if err != nil {
- return nil, err
- }
-
- var id user.ID
- user.IDFromKey(&id, (ecdsa.PublicKey)(*pubKey))
-
- return &id, nil
-}
diff --git a/pkg/innerring/processors/settlement/audit/calculator.go b/pkg/innerring/processors/settlement/audit/calculator.go
deleted file mode 100644
index fb8d82071d..0000000000
--- a/pkg/innerring/processors/settlement/audit/calculator.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package audit
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Calculator represents a component for calculating payments
-// based on data audit results and sending remittances to the chain.
-type Calculator struct {
- prm *CalculatorPrm
-
- opts *options
-}
-
-// CalculatorOption is a Calculator constructor's option.
-type CalculatorOption func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOptions() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// NewCalculator creates, initializes and returns a new Calculator instance.
-func NewCalculator(p *CalculatorPrm, opts ...CalculatorOption) *Calculator {
- o := defaultOptions()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &Calculator{
- prm: p,
- opts: o,
- }
-}
-
-// WithLogger returns an option to specify the logging component.
-func WithLogger(l *logger.Logger) CalculatorOption {
- return func(o *options) {
- o.log = l
- }
-}
diff --git a/pkg/innerring/processors/settlement/audit/prm.go b/pkg/innerring/processors/settlement/audit/prm.go
deleted file mode 100644
index d357f0d4f0..0000000000
--- a/pkg/innerring/processors/settlement/audit/prm.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package audit
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-// CalculatorPrm groups the parameters of Calculator's constructor.
-type CalculatorPrm struct {
- ResultStorage ResultStorage
-
- ContainerStorage common.ContainerStorage
-
- PlacementCalculator common.PlacementCalculator
-
- SGStorage SGStorage
-
- AccountStorage common.AccountStorage
-
- Exchanger common.Exchanger
-
- AuditFeeFetcher FeeFetcher
-}
-
-// ResultStorage is an interface of storage of the audit results.
-type ResultStorage interface {
- // Must return all audit results by epoch number.
- AuditResultsForEpoch(epoch uint64) ([]*audit.Result, error)
-}
-
-// SGInfo groups the data about FrostFS storage group
-// necessary for calculating audit fee.
-type SGInfo interface {
- // Must return sum size of the all group members.
- Size() uint64
-}
-
-// SGStorage is an interface of storage of the storage groups.
-type SGStorage interface {
- // Must return information about the storage group by address.
- SGInfo(oid.Address) (SGInfo, error)
-}
-
-// FeeFetcher wraps AuditFee method that returns audit fee price from
-// the network configuration.
-type FeeFetcher interface {
- AuditFee() (uint64, error)
-}
diff --git a/pkg/innerring/processors/settlement/basic/collect.go b/pkg/innerring/processors/settlement/basic/collect.go
deleted file mode 100644
index ee7354c4ff..0000000000
--- a/pkg/innerring/processors/settlement/basic/collect.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package basic
-
-import (
- "math/big"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "go.uber.org/zap"
-)
-
-var (
- bigGB = big.NewInt(1 << 30)
- bigZero = big.NewInt(0)
- bigOne = big.NewInt(1)
-)
-
-func (inc *IncomeSettlementContext) Collect() {
- inc.mu.Lock()
- defer inc.mu.Unlock()
-
- cachedRate, err := inc.rate.BasicRate()
- if err != nil {
- inc.log.Error("can't get basic income rate",
- zap.String("error", err.Error()))
-
- return
- }
-
- if cachedRate == 0 {
- inc.noop = true
- return
- }
-
- cnrEstimations, err := inc.estimations.Estimations(inc.epoch)
- if err != nil {
- inc.log.Error("can't fetch container size estimations",
- zap.Uint64("epoch", inc.epoch),
- zap.String("error", err.Error()))
-
- return
- }
-
- txTable := common.NewTransferTable()
-
- for i := range cnrEstimations {
- owner, err := inc.container.ContainerInfo(cnrEstimations[i].ContainerID)
- if err != nil {
- inc.log.Warn("can't fetch container info",
- zap.Uint64("epoch", inc.epoch),
- zap.Stringer("container_id", cnrEstimations[i].ContainerID),
- zap.String("error", err.Error()))
-
- continue
- }
-
- cnrNodes, err := inc.placement.ContainerNodes(inc.epoch, cnrEstimations[i].ContainerID)
- if err != nil {
- inc.log.Debug("can't fetch container info",
- zap.Uint64("epoch", inc.epoch),
- zap.Stringer("container_id", cnrEstimations[i].ContainerID),
- zap.String("error", err.Error()))
-
- continue
- }
-
- avg := inc.avgEstimation(cnrEstimations[i]) // average container size per node
- total := calculateBasicSum(avg, cachedRate, len(cnrNodes))
-
- // fill distribute asset table
- for i := range cnrNodes {
- inc.distributeTable.Put(cnrNodes[i].PublicKey(), avg)
- }
-
- txTable.Transfer(&common.TransferTx{
- From: owner.Owner(),
- To: inc.bankOwner,
- Amount: total,
- })
- }
-
- common.TransferAssets(inc.exchange, txTable, common.BasicIncomeCollectionDetails(inc.epoch))
-}
-
-// avgEstimation returns estimation value for a single container. Right now it
-// simply calculates an average of all announcements, however it can be smarter and
-// base the result on reputation of the announcers and clever math.
-func (inc *IncomeSettlementContext) avgEstimation(e *cntClient.Estimations) (avg uint64) {
- if len(e.Values) == 0 {
- return 0
- }
-
- for i := range e.Values {
- avg += e.Values[i].Size
- }
-
- return avg / uint64(len(e.Values))
-}
-
-func calculateBasicSum(size, rate uint64, ln int) *big.Int {
- bigRate := big.NewInt(int64(rate))
-
- total := size * uint64(ln)
-
- price := big.NewInt(0).SetUint64(total)
- price.Mul(price, bigRate)
- price.Div(price, bigGB)
-
- if price.Cmp(bigZero) == 0 {
- price.Add(price, bigOne)
- }
-
- return price
-}
diff --git a/pkg/innerring/processors/settlement/basic/context.go b/pkg/innerring/processors/settlement/basic/context.go
deleted file mode 100644
index 59bedf2e4b..0000000000
--- a/pkg/innerring/processors/settlement/basic/context.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package basic
-
-import (
- "math/big"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-type (
- EstimationFetcher interface {
- Estimations(uint64) ([]*container.Estimations, error)
- }
-
- RateFetcher interface {
- BasicRate() (uint64, error)
- }
-
- // BalanceFetcher uses NEP-17 compatible balance contract.
- BalanceFetcher interface {
- Balance(id user.ID) (*big.Int, error)
- }
-
- IncomeSettlementContext struct {
- mu sync.Mutex // lock to prevent collection and distribution in the same time
-
- noop bool
-
- log *logger.Logger
- epoch uint64
-
- rate RateFetcher
- estimations EstimationFetcher
- balances BalanceFetcher
- container common.ContainerStorage
- placement common.PlacementCalculator
- exchange common.Exchanger
- accounts common.AccountStorage
-
- bankOwner user.ID
-
- // this table is not thread safe, make sure you use it with mu.Lock()
- distributeTable *NodeSizeTable
- }
-
- IncomeSettlementContextPrms struct {
- Log *logger.Logger
- Epoch uint64
- Rate RateFetcher
- Estimations EstimationFetcher
- Balances BalanceFetcher
- Container common.ContainerStorage
- Placement common.PlacementCalculator
- Exchange common.Exchanger
- Accounts common.AccountStorage
- }
-)
-
-func NewIncomeSettlementContext(p *IncomeSettlementContextPrms) *IncomeSettlementContext {
- res := &IncomeSettlementContext{
- log: p.Log,
- epoch: p.Epoch,
- rate: p.Rate,
- estimations: p.Estimations,
- balances: p.Balances,
- container: p.Container,
- placement: p.Placement,
- exchange: p.Exchange,
- accounts: p.Accounts,
- distributeTable: NewNodeSizeTable(),
- }
-
- res.bankOwner.SetScriptHash(util.Uint160{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})
-
- return res
-}
diff --git a/pkg/innerring/processors/settlement/basic/distribute.go b/pkg/innerring/processors/settlement/basic/distribute.go
deleted file mode 100644
index e085f1e220..0000000000
--- a/pkg/innerring/processors/settlement/basic/distribute.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package basic
-
-import (
- "encoding/hex"
- "math/big"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
- "go.uber.org/zap"
-)
-
-func (inc *IncomeSettlementContext) Distribute() {
- inc.mu.Lock()
- defer inc.mu.Unlock()
-
- if inc.noop {
- return
- }
-
- txTable := common.NewTransferTable()
-
- bankBalance, err := inc.balances.Balance(inc.bankOwner)
- if err != nil {
- inc.log.Error("can't fetch balance of banking account",
- zap.String("error", err.Error()))
-
- return
- }
-
- total := inc.distributeTable.Total()
-
- inc.distributeTable.Iterate(func(key []byte, n *big.Int) {
- nodeOwner, err := inc.accounts.ResolveKey(nodeInfoWrapper(key))
- if err != nil {
- inc.log.Warn("can't transform public key to owner id",
- zap.String("public_key", hex.EncodeToString(key)),
- zap.String("error", err.Error()))
-
- return
- }
-
- txTable.Transfer(&common.TransferTx{
- From: inc.bankOwner,
- To: *nodeOwner,
- Amount: normalizedValue(n, total, bankBalance),
- })
- })
-
- common.TransferAssets(inc.exchange, txTable, common.BasicIncomeDistributionDetails(inc.epoch))
-}
-
-func normalizedValue(n, total, limit *big.Int) *big.Int {
- if limit.Cmp(bigZero) == 0 {
- return big.NewInt(0)
- }
-
- n.Mul(n, limit)
- return n.Div(n, total)
-}
diff --git a/pkg/innerring/processors/settlement/basic/distribute_test.go b/pkg/innerring/processors/settlement/basic/distribute_test.go
deleted file mode 100644
index 24eb0db3dd..0000000000
--- a/pkg/innerring/processors/settlement/basic/distribute_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package basic
-
-import (
- "math/big"
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-type normalizedValueCase struct {
- name string
- n, total, limit uint64
- expected uint64
-}
-
-func TestNormalizedValues(t *testing.T) {
- testCases := []normalizedValueCase{
- {
- name: "zero limit",
- n: 50,
- total: 100,
- limit: 0,
- expected: 0,
- },
- {
- name: "scale down",
- n: 50,
- total: 100,
- limit: 10,
- expected: 5,
- },
- {
- name: "scale up",
- n: 50,
- total: 100,
- limit: 1000,
- expected: 500,
- },
- }
-
- for _, testCase := range testCases {
- testNormalizedValues(t, testCase)
- }
-}
-
-func testNormalizedValues(t *testing.T, c normalizedValueCase) {
- n := big.NewInt(0).SetUint64(c.n)
- total := big.NewInt(0).SetUint64(c.total)
- limit := big.NewInt(0).SetUint64(c.limit)
- exp := big.NewInt(0).SetUint64(c.expected)
-
- got := normalizedValue(n, total, limit)
- require.Zero(t, exp.Cmp(got), c.name)
-}
diff --git a/pkg/innerring/processors/settlement/basic/util.go b/pkg/innerring/processors/settlement/basic/util.go
deleted file mode 100644
index 258bae46f9..0000000000
--- a/pkg/innerring/processors/settlement/basic/util.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package basic
-
-import (
- "math/big"
-)
-
-// NodeSizeTable is not thread safe, make sure it is accessed with external
-// locks or in single routine.
-type NodeSizeTable struct {
- prices map[string]uint64
- total uint64
-}
-
-func (t *NodeSizeTable) Put(id []byte, avg uint64) {
- t.prices[string(id)] += avg
- t.total += avg
-}
-
-func (t *NodeSizeTable) Total() *big.Int {
- return big.NewInt(0).SetUint64(t.total)
-}
-
-func (t *NodeSizeTable) Iterate(f func([]byte, *big.Int)) {
- for k, v := range t.prices {
- n := big.NewInt(0).SetUint64(v)
- f([]byte(k), n)
- }
-}
-
-func NewNodeSizeTable() *NodeSizeTable {
- return &NodeSizeTable{
- prices: make(map[string]uint64),
- }
-}
-
-type nodeInfoWrapper []byte
-
-func (nodeInfoWrapper) Price() *big.Int {
- panic("should not be used")
-}
-
-func (n nodeInfoWrapper) PublicKey() []byte {
- return n
-}
diff --git a/pkg/innerring/processors/settlement/calls.go b/pkg/innerring/processors/settlement/calls.go
deleted file mode 100644
index b4f44543e0..0000000000
--- a/pkg/innerring/processors/settlement/calls.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package settlement
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// HandleAuditEvent catches a new AuditEvent and
-// adds AuditProcessor call to the execution queue.
-func (p *Processor) HandleAuditEvent(e event.Event) {
- ev := e.(AuditEvent)
-
- epoch := ev.Epoch()
-
- log := &logger.Logger{Logger: p.log.With(
- zap.Uint64("epoch", epoch),
- )}
-
- log.Info("new audit settlement event")
-
- if epoch == 0 {
- log.Debug("ignore genesis epoch")
- return
- }
-
- handler := &auditEventHandler{
- log: log,
- epoch: epoch,
- proc: p.auditProc,
- }
-
- err := p.pool.Submit(handler.handle)
- if err != nil {
- log.Warn("could not add handler of AuditEvent to queue",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- log.Debug("AuditEvent handling successfully scheduled")
-}
-
-func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
- ev := e.(BasicIncomeCollectEvent)
- epoch := ev.Epoch()
-
- if !p.state.IsAlphabet() {
- p.log.Info("non alphabet mode, ignore income collection event")
-
- return
- }
-
- p.log.Info("start basic income collection",
- zap.Uint64("epoch", epoch))
-
- p.contextMu.Lock()
- defer p.contextMu.Unlock()
-
- if _, ok := p.incomeContexts[epoch]; ok {
- p.log.Error("income context already exists",
- zap.Uint64("epoch", epoch))
-
- return
- }
-
- incomeCtx, err := p.basicIncome.CreateContext(epoch)
- if err != nil {
- p.log.Error("can't create income context",
- zap.String("error", err.Error()))
-
- return
- }
-
- p.incomeContexts[epoch] = incomeCtx
-
- err = p.pool.Submit(func() {
- incomeCtx.Collect()
- })
- if err != nil {
- p.log.Warn("could not add handler of basic income collection to queue",
- zap.String("error", err.Error()),
- )
-
- return
- }
-}
-
-func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
- ev := e.(BasicIncomeDistributeEvent)
- epoch := ev.Epoch()
-
- if !p.state.IsAlphabet() {
- p.log.Info("non alphabet mode, ignore income distribution event")
-
- return
- }
-
- p.log.Info("start basic income distribution",
- zap.Uint64("epoch", epoch))
-
- p.contextMu.Lock()
- defer p.contextMu.Unlock()
-
- incomeCtx, ok := p.incomeContexts[epoch]
- delete(p.incomeContexts, epoch)
-
- if !ok {
- p.log.Warn("income context distribution does not exists",
- zap.Uint64("epoch", epoch))
-
- return
- }
-
- err := p.pool.Submit(func() {
- incomeCtx.Distribute()
- })
- if err != nil {
- p.log.Warn("could not add handler of basic income distribution to queue",
- zap.String("error", err.Error()),
- )
-
- return
- }
-}
diff --git a/pkg/innerring/processors/settlement/common/details.go b/pkg/innerring/processors/settlement/common/details.go
deleted file mode 100644
index 1cf719f634..0000000000
--- a/pkg/innerring/processors/settlement/common/details.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package common
-
-import (
- "encoding/binary"
-)
-
-var (
- auditPrefix = []byte{0x40}
- basicIncomeCollectionPrefix = []byte{0x41}
- basicIncomeDistributionPrefix = []byte{0x42}
-)
-
-func AuditSettlementDetails(epoch uint64) []byte {
- return details(auditPrefix, epoch)
-}
-
-func BasicIncomeCollectionDetails(epoch uint64) []byte {
- return details(basicIncomeCollectionPrefix, epoch)
-}
-
-func BasicIncomeDistributionDetails(epoch uint64) []byte {
- return details(basicIncomeDistributionPrefix, epoch)
-}
-
-func details(prefix []byte, epoch uint64) []byte {
- prefixLen := len(prefix)
- buf := make([]byte, prefixLen+8)
-
- copy(buf, prefix)
- binary.LittleEndian.PutUint64(buf[prefixLen:], epoch)
-
- return buf
-}
diff --git a/pkg/innerring/processors/settlement/common/details_test.go b/pkg/innerring/processors/settlement/common/details_test.go
deleted file mode 100644
index 9755e6aef7..0000000000
--- a/pkg/innerring/processors/settlement/common/details_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package common
-
-import (
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestAuditSettlementDetails(t *testing.T) {
- var n uint64 = 1994 // 0x7CA
- exp := []byte{0x40, 0xCA, 0x07, 0, 0, 0, 0, 0, 0}
- got := AuditSettlementDetails(n)
- require.Equal(t, exp, got)
-}
-
-func TestBasicIncomeCollectionDetails(t *testing.T) {
- var n uint64 = 1994 // 0x7CA
- exp := []byte{0x41, 0xCA, 0x07, 0, 0, 0, 0, 0, 0}
- got := BasicIncomeCollectionDetails(n)
- require.Equal(t, exp, got)
-}
-
-func TestBasicIncomeDistributionDetails(t *testing.T) {
- var n uint64 = 1994 // 0x7CA
- exp := []byte{0x42, 0xCA, 0x07, 0, 0, 0, 0, 0, 0}
- got := BasicIncomeDistributionDetails(n)
- require.Equal(t, exp, got)
-}
diff --git a/pkg/innerring/processors/settlement/common/types.go b/pkg/innerring/processors/settlement/common/types.go
deleted file mode 100644
index 9dca0fd0da..0000000000
--- a/pkg/innerring/processors/settlement/common/types.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package common
-
-import (
- "math/big"
-
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-// NodeInfo groups the data about the storage node
-// necessary for calculating audit fees.
-type NodeInfo interface {
- // Must return storage price of the node for one epoch in GASe-12.
- Price() *big.Int
-
- // Must return public key of the node.
- PublicKey() []byte
-}
-
-// ContainerInfo groups the data about FrostFS container
-// necessary for calculating audit fee.
-type ContainerInfo interface {
- // Must return identifier of the container owner.
- Owner() user.ID
-}
-
-// ContainerStorage is an interface of
-// storage of the FrostFS containers.
-type ContainerStorage interface {
- // Must return information about the container by ID.
- ContainerInfo(cid.ID) (ContainerInfo, error)
-}
-
-// PlacementCalculator is a component interface
-// that builds placement vectors.
-type PlacementCalculator interface {
- // Must return information about the nodes from container by its ID of the given epoch.
- ContainerNodes(uint64, cid.ID) ([]NodeInfo, error)
-}
-
-// AccountStorage is an network member accounts interface.
-type AccountStorage interface {
- // Must resolve information about the storage node
- // to its ID in system.
- ResolveKey(NodeInfo) (*user.ID, error)
-}
-
-// Exchanger is an interface of monetary component.
-type Exchanger interface {
- // Must transfer amount of GASe-12 from sender to recipient.
- //
- // Amount must be positive.
- Transfer(sender, recipient user.ID, amount *big.Int, details []byte)
-}
diff --git a/pkg/innerring/processors/settlement/common/util.go b/pkg/innerring/processors/settlement/common/util.go
deleted file mode 100644
index 6f40fb577e..0000000000
--- a/pkg/innerring/processors/settlement/common/util.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package common
-
-import (
- "math/big"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-type TransferTable struct {
- txs map[string]map[string]*TransferTx
-}
-
-type TransferTx struct {
- From, To user.ID
-
- Amount *big.Int
-}
-
-func NewTransferTable() *TransferTable {
- return &TransferTable{
- txs: make(map[string]map[string]*TransferTx),
- }
-}
-
-func (t *TransferTable) Transfer(tx *TransferTx) {
- if tx.From.Equals(tx.To) {
- return
- }
-
- from, to := tx.From.EncodeToString(), tx.To.EncodeToString()
-
- m, ok := t.txs[from]
- if !ok {
- if m, ok = t.txs[to]; ok {
- to = from // ignore `From = To` swap because `From` doesn't require
- tx.Amount.Neg(tx.Amount)
- } else {
- m = make(map[string]*TransferTx, 1)
- t.txs[from] = m
- }
- }
-
- tgt, ok := m[to]
- if !ok {
- m[to] = tx
- return
- }
-
- tgt.Amount.Add(tgt.Amount, tx.Amount)
-}
-
-func (t *TransferTable) Iterate(f func(*TransferTx)) {
- for _, m := range t.txs {
- for _, tx := range m {
- f(tx)
- }
- }
-}
-
-func TransferAssets(e Exchanger, t *TransferTable, details []byte) {
- t.Iterate(func(tx *TransferTx) {
- sign := tx.Amount.Sign()
- if sign == 0 {
- return
- }
-
- if sign < 0 {
- tx.From, tx.To = tx.To, tx.From
- tx.Amount.Neg(tx.Amount)
- }
-
- e.Transfer(tx.From, tx.To, tx.Amount, details)
- })
-}
diff --git a/pkg/innerring/processors/settlement/deps.go b/pkg/innerring/processors/settlement/deps.go
deleted file mode 100644
index 37d7955ad4..0000000000
--- a/pkg/innerring/processors/settlement/deps.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package settlement
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic"
-)
-
-// AuditProcessor is an interface of data audit fee processor.
-type AuditProcessor interface {
- // Must process data audit conducted in epoch.
- ProcessAuditSettlements(epoch uint64)
-}
-
-// BasicIncomeInitializer is an interface of basic income context creator.
-type BasicIncomeInitializer interface {
- // Creates context that processes basic income for provided epoch.
- CreateContext(epoch uint64) (*basic.IncomeSettlementContext, error)
-}
diff --git a/pkg/innerring/processors/settlement/events.go b/pkg/innerring/processors/settlement/events.go
deleted file mode 100644
index a47a3e89b5..0000000000
--- a/pkg/innerring/processors/settlement/events.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package settlement
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
-)
-
-// AuditEvent is an event of the start of
-// cash settlements for data audit.
-type AuditEvent struct {
- epoch uint64
-}
-
-type (
- BasicIncomeCollectEvent = AuditEvent
- BasicIncomeDistributeEvent = AuditEvent
-)
-
-// MorphEvent implements Neo:Morph event.
-func (e AuditEvent) MorphEvent() {}
-
-// NewAuditEvent creates new AuditEvent for epoch.
-func NewAuditEvent(epoch uint64) event.Event {
- return AuditEvent{
- epoch: epoch,
- }
-}
-
-// Epoch returns the number of the epoch
-// in which the event was generated.
-func (e AuditEvent) Epoch() uint64 {
- return e.epoch
-}
-
-// NewBasicIncomeCollectEvent for epoch.
-func NewBasicIncomeCollectEvent(epoch uint64) event.Event {
- return BasicIncomeCollectEvent{
- epoch: epoch,
- }
-}
-
-// NewBasicIncomeDistributeEvent for epoch.
-func NewBasicIncomeDistributeEvent(epoch uint64) event.Event {
- return BasicIncomeDistributeEvent{
- epoch: epoch,
- }
-}
diff --git a/pkg/innerring/processors/settlement/handlers.go b/pkg/innerring/processors/settlement/handlers.go
deleted file mode 100644
index f73b619835..0000000000
--- a/pkg/innerring/processors/settlement/handlers.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package settlement
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-
-type auditEventHandler struct {
- log *logger.Logger
-
- epoch uint64
-
- proc AuditProcessor
-}
-
-func (p *auditEventHandler) handle() {
- p.log.Info("process audit settlements")
-
- p.proc.ProcessAuditSettlements(p.epoch)
-
- p.log.Info("audit processing finished")
-}
diff --git a/pkg/innerring/processors/settlement/opts.go b/pkg/innerring/processors/settlement/opts.go
deleted file mode 100644
index b344f98d63..0000000000
--- a/pkg/innerring/processors/settlement/opts.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package settlement
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option is a Processor constructor's option.
-type Option func(*options)
-
-type options struct {
- poolSize int
-
- log *logger.Logger
-}
-
-func defaultOptions() *options {
- const poolSize = 10
-
- return &options{
- poolSize: poolSize,
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns option to override the component for logging.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- o.log = l
- }
-}
diff --git a/pkg/innerring/processors/settlement/processor.go b/pkg/innerring/processors/settlement/processor.go
deleted file mode 100644
index e86666d5c0..0000000000
--- a/pkg/innerring/processors/settlement/processor.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package settlement
-
-import (
- "fmt"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic"
- nodeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
-)
-
-type (
- // AlphabetState is a callback interface for inner ring global state.
- AlphabetState interface {
- IsAlphabet() bool
- }
-
- // Processor is an event handler for payments in the system.
- Processor struct {
- log *logger.Logger
-
- state AlphabetState
-
- pool nodeutil.WorkerPool
-
- auditProc AuditProcessor
-
- basicIncome BasicIncomeInitializer
-
- contextMu sync.Mutex
- incomeContexts map[uint64]*basic.IncomeSettlementContext
- }
-
- // Prm groups the required parameters of Processor's constructor.
- Prm struct {
- AuditProcessor AuditProcessor
- BasicIncome BasicIncomeInitializer
- State AlphabetState
- }
-)
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf("invalid parameter %s (%T):%v", n, v, v))
-}
-
-// New creates and returns a new Processor instance.
-func New(prm Prm, opts ...Option) *Processor {
- switch {
- case prm.AuditProcessor == nil:
- panicOnPrmValue("AuditProcessor", prm.AuditProcessor)
- }
-
- o := defaultOptions()
-
- for i := range opts {
- opts[i](o)
- }
-
- pool, err := ants.NewPool(o.poolSize, ants.WithNonblocking(true))
- if err != nil {
- panic(fmt.Errorf("could not create worker pool: %w", err))
- }
-
- o.log.Debug("worker pool for settlement processor successfully initialized",
- zap.Int("capacity", o.poolSize),
- )
-
- return &Processor{
- log: o.log,
- state: prm.State,
- pool: pool,
- auditProc: prm.AuditProcessor,
- basicIncome: prm.BasicIncome,
- incomeContexts: make(map[uint64]*basic.IncomeSettlementContext),
- }
-}
diff --git a/pkg/innerring/processors/subnet/common.go b/pkg/innerring/processors/subnet/common.go
deleted file mode 100644
index 2026c8641d..0000000000
--- a/pkg/innerring/processors/subnet/common.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package subnetevents
-
-import (
- "fmt"
-
- subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
-)
-
-// common interface of subnet notifications with subnet ID.
-type eventWithID interface {
- // ReadID reads identifier of the subnet.
- ReadID(*subnetid.ID) error
-}
-
-// an error which is returned on zero subnet operation attempt.
-type zeroSubnetOp struct {
- op string
-}
-
-func (x zeroSubnetOp) Error() string {
- return fmt.Sprintf("zero subnet %s", x.op)
-}
diff --git a/pkg/innerring/processors/subnet/common_test.go b/pkg/innerring/processors/subnet/common_test.go
deleted file mode 100644
index 23e61a44a4..0000000000
--- a/pkg/innerring/processors/subnet/common_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package subnetevents
-
-import subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
-
-type idEvent struct {
- id subnetid.ID
-
- idErr error
-}
-
-func (x idEvent) ReadID(id *subnetid.ID) error {
- if x.idErr != nil {
- return x.idErr
- }
-
- *id = x.id
-
- return nil
-}
diff --git a/pkg/innerring/processors/subnet/put.go b/pkg/innerring/processors/subnet/put.go
deleted file mode 100644
index ba1588756b..0000000000
--- a/pkg/innerring/processors/subnet/put.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package subnetevents
-
-import (
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet"
- subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-// Put represents a notification about FrostFS subnet creation.
-// Generated by a contract when intending to create a subnet.
-type Put interface {
- // Contains the ID of the subnet to be created.
- eventWithID
-
- // ReadCreator reads the user ID of the subnet creator.
- // Returns an error if the ID is missing.
- ReadCreator(id *user.ID) error
-
- // ReadInfo reads information about a subnet to be created.
- ReadInfo(info *subnet.Info) error
-}
-
-// PutValidator asserts intent to create a subnet.
-type PutValidator struct{}
-
-// errDiffOwner is returned when the subnet owners differ.
-var errDiffOwner = errors.New("diff subnet owners")
-
-// errDiffID is returned when the subnet IDs differ.
-var errDiffID = errors.New("diff subnet IDs")
-
-// Assert processes the attempt to create a subnet. It approves the creation through nil return.
-//
-// All read errors of Put are forwarded.
-//
-// It returns an error on:
-// - zero subnet creation;
-// - empty ID or different from the one wired into info;
-// - empty owner ID or different from the one wired into info.
-func (x PutValidator) Assert(event Put) error {
- var err error
-
- // read ID
- var id subnetid.ID
- if err = event.ReadID(&id); err != nil {
- return fmt.Errorf("read ID: %w", err)
- }
-
- // prevent zero subnet creation
- if subnetid.IsZero(id) {
- return zeroSubnetOp{
- op: "creation",
- }
- }
-
- // read creator's user ID in FrostFS system
- var creator user.ID
- if err = event.ReadCreator(&creator); err != nil {
- return fmt.Errorf("read creator: %w", err)
- }
-
- // read information about the subnet
- var info subnet.Info
- if err = event.ReadInfo(&info); err != nil {
- return fmt.Errorf("read info: %w", err)
- }
-
- // check if the explicit ID equals to the one from info
- if !subnet.AssertReference(info, id) {
- return errDiffID
- }
-
- // check if the explicit creator equals to the one from info
- if !subnet.AssertOwnership(info, creator) {
- return errDiffOwner
- }
-
- return nil
-}
diff --git a/pkg/innerring/processors/subnet/put_test.go b/pkg/innerring/processors/subnet/put_test.go
deleted file mode 100644
index dda6ee90a4..0000000000
--- a/pkg/innerring/processors/subnet/put_test.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package subnetevents
-
-import (
- "errors"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "github.com/stretchr/testify/require"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet"
- subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
-)
-
-type put struct {
- idEvent
-
- creator user.ID
-
- creatorErr error
-
- info subnet.Info
-
- infoErr error
-}
-
-func (x put) ReadCreator(id *user.ID) error {
- if x.creatorErr != nil {
- return x.creatorErr
- }
-
- *id = x.creator
-
- return nil
-}
-
-func (x put) ReadInfo(info *subnet.Info) error {
- if x.infoErr != nil {
- return x.infoErr
- }
-
- *info = x.info
-
- return nil
-}
-
-func TestPutValidator_Assert(t *testing.T) {
- var (
- v PutValidator
-
- e put
-
- err error
- )
-
- // read ID error
- e.idErr = errors.New("id err")
-
- err = v.Assert(e)
- require.ErrorIs(t, err, e.idErr)
-
- e.idErr = nil
-
- // zero subnet ID
- subnetid.MakeZero(&e.id)
-
- err = v.Assert(e)
- require.ErrorAs(t, err, new(zeroSubnetOp))
-
- const idNum = 13
- e.id.SetNumeric(idNum)
-
- // read creator error
- e.creatorErr = errors.New("creator err")
-
- err = v.Assert(e)
- require.ErrorIs(t, err, e.creatorErr)
-
- e.creatorErr = nil
-
- // read info error
- e.infoErr = errors.New("info err")
-
- err = v.Assert(e)
- require.ErrorIs(t, err, e.infoErr)
-
- e.infoErr = nil
-
- // diff explicit ID and the one in info
- var id2 subnetid.ID
-
- id2.SetNumeric(idNum + 1)
-
- e.info.SetID(id2)
-
- err = v.Assert(e)
- require.ErrorIs(t, err, errDiffID)
-
- e.info.SetID(e.id)
-
- // diff explicit creator and the one in info
- creator2 := *usertest.ID()
-
- e.info.SetOwner(creator2)
-
- err = v.Assert(e)
- require.ErrorIs(t, err, errDiffOwner)
-
- e.info.SetOwner(e.creator)
-
- err = v.Assert(e)
- require.NoError(t, err)
-}
diff --git a/pkg/innerring/processors/util.go b/pkg/innerring/processors/util.go
new file mode 100644
index 0000000000..364ffe25e4
--- /dev/null
+++ b/pkg/innerring/processors/util.go
@@ -0,0 +1,16 @@
+package processors
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
+ "github.com/panjf2000/ants/v2"
+)
+
+func SubmitEvent(pool *ants.Pool, metrics metrics.Register, eventLabel string, eventProcessor func() bool) error {
+ return pool.Submit(func() {
+ start := time.Now()
+ success := eventProcessor()
+ metrics.AddEvent(time.Since(start), eventLabel, success)
+ })
+}
diff --git a/pkg/innerring/rpc.go b/pkg/innerring/rpc.go
deleted file mode 100644
index 013023b09c..0000000000
--- a/pkg/innerring/rpc.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package innerring
-
-import (
- "context"
- "crypto/ecdsa"
- "fmt"
- "time"
-
- clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- storagegroup2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
- frostfsapiclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/auditor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
- "go.uber.org/zap"
-)
-
-type (
- ClientCache struct {
- log *logger.Logger
- cache interface {
- Get(clientcore.NodeInfo) (clientcore.Client, error)
- CloseAll()
- }
- key *ecdsa.PrivateKey
-
- sgTimeout, headTimeout, rangeTimeout time.Duration
- }
-
- clientCacheParams struct {
- Log *logger.Logger
- Key *ecdsa.PrivateKey
-
- AllowExternal bool
-
- SGTimeout, HeadTimeout, RangeTimeout time.Duration
- }
-)
-
-func newClientCache(p *clientCacheParams) *ClientCache {
- return &ClientCache{
- log: p.Log,
- cache: cache.NewSDKClientCache(cache.ClientCacheOpts{AllowExternal: p.AllowExternal, Key: p.Key}),
- key: p.Key,
- sgTimeout: p.SGTimeout,
- headTimeout: p.HeadTimeout,
- rangeTimeout: p.RangeTimeout,
- }
-}
-
-func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.Client, error) {
- // Because cache is used by `ClientCache` exclusively,
- // client will always have valid key.
- return c.cache.Get(info)
-}
-
-// GetSG polls the container to get the object by id.
-// Returns storage groups structure from received object.
-//
-// Returns an error of type apistatus.ObjectNotFound if storage group is missing.
-func (c *ClientCache) GetSG(prm storagegroup2.GetSGPrm) (*storagegroup.StorageGroup, error) {
- var sgAddress oid.Address
- sgAddress.SetContainer(prm.CID)
- sgAddress.SetObject(prm.OID)
-
- return c.getSG(prm.Context, sgAddress, &prm.NetMap, prm.Container)
-}
-
-func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.NetMap, cn [][]netmap.NodeInfo) (*storagegroup.StorageGroup, error) {
- obj := addr.Object()
-
- nodes, err := placement.BuildObjectPlacement(nm, cn, &obj)
- if err != nil {
- return nil, fmt.Errorf("can't build object placement: %w", err)
- }
-
- var info clientcore.NodeInfo
-
- var getObjPrm frostfsapiclient.GetObjectPrm
- getObjPrm.SetAddress(addr)
-
- for _, node := range placement.FlattenNodes(nodes) {
- err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(node))
- if err != nil {
- return nil, fmt.Errorf("parse client node info: %w", err)
- }
-
- cli, err := c.getWrappedClient(info)
- if err != nil {
- c.log.Warn("can't setup remote connection",
- zap.String("error", err.Error()))
-
- continue
- }
-
- cctx, cancel := context.WithTimeout(ctx, c.sgTimeout)
- getObjPrm.SetContext(cctx)
-
- // NOTE: we use the function which does not verify object integrity (checksums, signature),
- // but it would be useful to do as part of a data audit.
- res, err := cli.GetObject(getObjPrm)
-
- cancel()
-
- if err != nil {
- c.log.Warn("can't get storage group object",
- zap.String("error", err.Error()))
-
- continue
- }
-
- var sg storagegroup.StorageGroup
-
- err = storagegroup.ReadFromObject(&sg, *res.Object())
- if err != nil {
- return nil, fmt.Errorf("can't parse storage group from a object: %w", err)
- }
-
- return &sg, nil
- }
-
- var errNotFound apistatus.ObjectNotFound
-
- return nil, errNotFound
-}
-
-// GetHeader requests node from the container under audit to return object header by id.
-func (c *ClientCache) GetHeader(prm auditor.GetHeaderPrm) (*object.Object, error) {
- var objAddress oid.Address
- objAddress.SetContainer(prm.CID)
- objAddress.SetObject(prm.OID)
-
- var info clientcore.NodeInfo
-
- err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(prm.Node))
- if err != nil {
- return nil, fmt.Errorf("parse client node info: %w", err)
- }
-
- cli, err := c.getWrappedClient(info)
- if err != nil {
- return nil, fmt.Errorf("can't setup remote connection with %s: %w", info.AddressGroup(), err)
- }
-
- cctx, cancel := context.WithTimeout(prm.Context, c.headTimeout)
-
- var obj *object.Object
-
- if prm.NodeIsRelay {
- obj, err = frostfsapiclient.GetObjectHeaderFromContainer(cctx, cli, objAddress)
- } else {
- obj, err = frostfsapiclient.GetRawObjectHeaderLocally(cctx, cli, objAddress)
- }
-
- cancel()
-
- if err != nil {
- return nil, fmt.Errorf("object head error: %w", err)
- }
-
- return obj, nil
-}
-
-// GetRangeHash requests node from the container under audit to return Tillich-Zemor hash of the
-// payload range of the object with specified identifier.
-func (c *ClientCache) GetRangeHash(prm auditor.GetRangeHashPrm) ([]byte, error) {
- var objAddress oid.Address
- objAddress.SetContainer(prm.CID)
- objAddress.SetObject(prm.OID)
-
- var info clientcore.NodeInfo
-
- err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(prm.Node))
- if err != nil {
- return nil, fmt.Errorf("parse client node info: %w", err)
- }
-
- cli, err := c.getWrappedClient(info)
- if err != nil {
- return nil, fmt.Errorf("can't setup remote connection with %s: %w", info.AddressGroup(), err)
- }
-
- cctx, cancel := context.WithTimeout(prm.Context, c.rangeTimeout)
-
- h, err := frostfsapiclient.HashObjectRange(cctx, cli, objAddress, prm.Range)
-
- cancel()
-
- if err != nil {
- return nil, fmt.Errorf("object rangehash error: %w", err)
- }
-
- return h, nil
-}
-
-func (c *ClientCache) getWrappedClient(info clientcore.NodeInfo) (frostfsapiclient.Client, error) {
- // can be also cached
- var cInternal frostfsapiclient.Client
-
- cli, err := c.Get(info)
- if err != nil {
- return cInternal, fmt.Errorf("could not get API client from cache")
- }
-
- cInternal.WrapBasicClient(cli)
- cInternal.SetPrivateKey(c.key)
-
- return cInternal, nil
-}
-
-func (c ClientCache) ListSG(dst *storagegroup2.SearchSGDst, prm storagegroup2.SearchSGPrm) error {
- cli, err := c.getWrappedClient(prm.NodeInfo)
- if err != nil {
- return fmt.Errorf("could not get API client from cache")
- }
-
- var cliPrm frostfsapiclient.SearchSGPrm
-
- cliPrm.SetContext(prm.Context)
- cliPrm.SetContainerID(prm.Container)
-
- res, err := cli.SearchSG(cliPrm)
- if err != nil {
- return err
- }
-
- dst.Objects = res.IDList()
-
- return nil
-}
diff --git a/pkg/innerring/settlement.go b/pkg/innerring/settlement.go
deleted file mode 100644
index 08e7a9f4da..0000000000
--- a/pkg/innerring/settlement.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package innerring
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "math/big"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
- auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit"
- balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
- containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- auditAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
- containerAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- netmapAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "go.uber.org/zap"
-)
-
-const (
- auditSettlementContext = "audit"
- basicIncomeSettlementContext = "basic income"
-)
-
-type settlementDeps struct {
- log *logger.Logger
-
- cnrSrc container.Source
-
- auditClient *auditClient.Client
-
- nmClient *netmapClient.Client
-
- clientCache *ClientCache
-
- balanceClient *balanceClient.Client
-
- settlementCtx string
-}
-
-type auditSettlementDeps struct {
- settlementDeps
-}
-
-type basicIncomeSettlementDeps struct {
- settlementDeps
- cnrClient *containerClient.Client
-}
-
-type basicSettlementConstructor struct {
- dep *basicIncomeSettlementDeps
-}
-
-type auditSettlementCalculator audit.Calculator
-
-type containerWrapper containerAPI.Container
-
-type nodeInfoWrapper struct {
- ni netmapAPI.NodeInfo
-}
-
-type sgWrapper storagegroup.StorageGroup
-
-func (s *sgWrapper) Size() uint64 {
- return (*storagegroup.StorageGroup)(s).ValidationDataSize()
-}
-
-func (n nodeInfoWrapper) PublicKey() []byte {
- return n.ni.PublicKey()
-}
-
-func (n nodeInfoWrapper) Price() *big.Int {
- return big.NewInt(int64(n.ni.Price()))
-}
-
-func (c containerWrapper) Owner() user.ID {
- return (containerAPI.Container)(c).Owner()
-}
-
-func (s settlementDeps) AuditResultsForEpoch(epoch uint64) ([]*auditAPI.Result, error) {
- idList, err := s.auditClient.ListAuditResultIDByEpoch(epoch)
- if err != nil {
- return nil, fmt.Errorf("could not list audit results in sidechain: %w", err)
- }
-
- res := make([]*auditAPI.Result, 0, len(idList))
-
- for i := range idList {
- r, err := s.auditClient.GetAuditResult(idList[i])
- if err != nil {
- return nil, fmt.Errorf("could not get audit result: %w", err)
- }
-
- res = append(res, r)
- }
-
- return res, nil
-}
-
-func (s settlementDeps) ContainerInfo(cid cid.ID) (common.ContainerInfo, error) {
- cnr, err := s.cnrSrc.Get(cid)
- if err != nil {
- return nil, fmt.Errorf("could not get container from storage: %w", err)
- }
-
- return (containerWrapper)(cnr.Value), nil
-}
-
-func (s settlementDeps) buildContainer(e uint64, cid cid.ID) ([][]netmapAPI.NodeInfo, *netmapAPI.NetMap, error) {
- var (
- nm *netmapAPI.NetMap
- err error
- )
-
- if e > 0 {
- nm, err = s.nmClient.GetNetMapByEpoch(e)
- } else {
- nm, err = netmap.GetLatestNetworkMap(s.nmClient)
- }
-
- if err != nil {
- return nil, nil, fmt.Errorf("could not get network map from storage: %w", err)
- }
-
- cnr, err := s.cnrSrc.Get(cid)
- if err != nil {
- return nil, nil, fmt.Errorf("could not get container from sidechain: %w", err)
- }
-
- binCnr := make([]byte, sha256.Size)
- cid.Encode(binCnr)
-
- cn, err := nm.ContainerNodes(
- cnr.Value.PlacementPolicy(),
- binCnr, // may be replace pivot calculation to frostfs-api-go
- )
- if err != nil {
- return nil, nil, fmt.Errorf("could not calculate container nodes: %w", err)
- }
-
- return cn, nm, nil
-}
-
-func (s settlementDeps) ContainerNodes(e uint64, cid cid.ID) ([]common.NodeInfo, error) {
- cn, _, err := s.buildContainer(e, cid)
- if err != nil {
- return nil, err
- }
-
- var sz int
-
- for i := range cn {
- sz += len(cn[i])
- }
-
- res := make([]common.NodeInfo, 0, sz)
-
- for i := range cn {
- for j := range cn[i] {
- res = append(res, nodeInfoWrapper{
- ni: cn[i][j],
- })
- }
- }
-
- return res, nil
-}
-
-// SGInfo returns audit.SGInfo by object address.
-//
-// Returns an error of type apistatus.ObjectNotFound if storage group is missing.
-func (s settlementDeps) SGInfo(addr oid.Address) (audit.SGInfo, error) {
- cnr := addr.Container()
-
- cn, nm, err := s.buildContainer(0, cnr)
- if err != nil {
- return nil, err
- }
-
- sg, err := s.clientCache.getSG(context.Background(), addr, nm, cn)
- if err != nil {
- return nil, err
- }
-
- return (*sgWrapper)(sg), nil
-}
-
-func (s settlementDeps) ResolveKey(ni common.NodeInfo) (*user.ID, error) {
- pub, err := keys.NewPublicKeyFromBytes(ni.PublicKey(), elliptic.P256())
- if err != nil {
- return nil, err
- }
-
- var id user.ID
- user.IDFromKey(&id, (ecdsa.PublicKey)(*pub))
-
- return &id, nil
-}
-
-func (s settlementDeps) Transfer(sender, recipient user.ID, amount *big.Int, details []byte) {
- if s.settlementCtx == "" {
- panic("unknown settlement deps context")
- }
-
- log := s.log.With(
- zap.Stringer("sender", sender),
- zap.Stringer("recipient", recipient),
- zap.Stringer("amount (GASe-12)", amount),
- zap.String("details", hex.EncodeToString(details)),
- )
-
- if !amount.IsInt64() {
- s.log.Error("amount can not be represented as an int64")
-
- return
- }
-
- params := balanceClient.TransferPrm{
- Amount: amount.Int64(),
- From: sender,
- To: recipient,
- Details: details,
- }
-
- err := s.balanceClient.TransferX(params)
- if err != nil {
- log.Error(fmt.Sprintf("%s: could not send transfer", s.settlementCtx),
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- log.Debug(fmt.Sprintf("%s: transfer was successfully sent", s.settlementCtx))
-}
-
-func (b basicIncomeSettlementDeps) BasicRate() (uint64, error) {
- return b.nmClient.BasicIncomeRate()
-}
-
-func (b basicIncomeSettlementDeps) Estimations(epoch uint64) ([]*containerClient.Estimations, error) {
- estimationIDs, err := b.cnrClient.ListLoadEstimationsByEpoch(epoch)
- if err != nil {
- return nil, err
- }
-
- result := make([]*containerClient.Estimations, 0, len(estimationIDs))
-
- for i := range estimationIDs {
- estimation, err := b.cnrClient.GetUsedSpaceEstimations(estimationIDs[i])
- if err != nil {
- b.log.Warn("can't get used space estimation",
- zap.String("estimation_id", hex.EncodeToString(estimationIDs[i])),
- zap.String("error", err.Error()))
-
- continue
- }
-
- result = append(result, estimation)
- }
-
- return result, nil
-}
-
-func (b basicIncomeSettlementDeps) Balance(id user.ID) (*big.Int, error) {
- return b.balanceClient.BalanceOf(id)
-}
-
-func (s *auditSettlementCalculator) ProcessAuditSettlements(epoch uint64) {
- (*audit.Calculator)(s).Calculate(&audit.CalculatePrm{
- Epoch: epoch,
- })
-}
-
-func (b *basicSettlementConstructor) CreateContext(epoch uint64) (*basic.IncomeSettlementContext, error) {
- return basic.NewIncomeSettlementContext(&basic.IncomeSettlementContextPrms{
- Log: b.dep.log,
- Epoch: epoch,
- Rate: b.dep,
- Estimations: b.dep,
- Balances: b.dep,
- Container: b.dep,
- Placement: b.dep,
- Exchange: b.dep,
- Accounts: b.dep,
- }), nil
-}
diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go
index 903d9c876c..0ef771359b 100644
--- a/pkg/innerring/state.go
+++ b/pkg/innerring/state.go
@@ -1,13 +1,14 @@
package innerring
import (
+ "context"
"fmt"
"sort"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
- auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/spf13/viper"
@@ -30,8 +31,8 @@ func (s *Server) EpochCounter() uint64 {
// epoch counter.
func (s *Server) SetEpochCounter(val uint64) {
s.epochCounter.Store(val)
- if s.metrics != nil {
- s.metrics.SetEpoch(val)
+ if s.irMetrics != nil {
+ s.irMetrics.SetEpoch(val)
}
}
@@ -47,21 +48,21 @@ func (s *Server) SetEpochDuration(val uint64) {
}
// IsActive is a getter for a global active flag state.
-func (s *Server) IsActive() bool {
- return s.InnerRingIndex() >= 0
+func (s *Server) IsActive(ctx context.Context) bool {
+ return s.InnerRingIndex(ctx) >= 0
}
// IsAlphabet is a getter for a global alphabet flag state.
-func (s *Server) IsAlphabet() bool {
- return s.AlphabetIndex() >= 0
+func (s *Server) IsAlphabet(ctx context.Context) bool {
+ return s.AlphabetIndex(ctx) >= 0
}
// InnerRingIndex is a getter for a global index of node in inner ring list. Negative
// index means that node is not in the inner ring list.
-func (s *Server) InnerRingIndex() int {
- index, err := s.statusIndex.InnerRingIndex()
+func (s *Server) InnerRingIndex(ctx context.Context) int {
+ index, err := s.statusIndex.InnerRingIndex(ctx)
if err != nil {
- s.log.Error("can't get inner ring index", zap.String("error", err.Error()))
+ s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
return -1
}
@@ -70,10 +71,10 @@ func (s *Server) InnerRingIndex() int {
// InnerRingSize is a getter for a global size of inner ring list. This value
// paired with inner ring index.
-func (s *Server) InnerRingSize() int {
- size, err := s.statusIndex.InnerRingSize()
+func (s *Server) InnerRingSize(ctx context.Context) int {
+ size, err := s.statusIndex.InnerRingSize(ctx)
if err != nil {
- s.log.Error("can't get inner ring size", zap.String("error", err.Error()))
+ s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
return 0
}
@@ -82,28 +83,28 @@ func (s *Server) InnerRingSize() int {
// AlphabetIndex is a getter for a global index of node in alphabet list.
// Negative index means that node is not in the alphabet list.
-func (s *Server) AlphabetIndex() int {
- index, err := s.statusIndex.AlphabetIndex()
+func (s *Server) AlphabetIndex(ctx context.Context) int {
+ index, err := s.statusIndex.AlphabetIndex(ctx)
if err != nil {
- s.log.Error("can't get alphabet index", zap.String("error", err.Error()))
+ s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
return -1
}
return int(index)
}
-func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) error {
+func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
validators := prm.Validators
- index := s.InnerRingIndex()
+ index := s.InnerRingIndex(ctx)
if s.contracts.alphabet.indexOutOfRange(index) {
- s.log.Info("ignore validator vote: node not in alphabet range")
+ s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
return nil
}
if len(validators) == 0 {
- s.log.Info("ignore validator vote: empty validators list")
+ s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
return nil
}
@@ -118,7 +119,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
)
if prm.Hash != nil {
- nonce, vub, err = s.morphClient.CalculateNonceAndVUB(*prm.Hash)
+ nonce, vub, err = s.morphClient.CalculateNonceAndVUB(prm.Hash)
if err != nil {
return fmt.Errorf("could not calculate nonce and `validUntilBlock` values: %w", err)
}
@@ -126,12 +127,12 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
}
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
- err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
+ _, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
if err != nil {
- s.log.Warn("can't invoke vote method in alphabet contract",
+ s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
- zap.String("error", err.Error()))
+ zap.Error(err))
}
})
@@ -140,21 +141,9 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
// VoteForSidechainValidator calls vote method on alphabet contracts with
// the provided list of keys.
-func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) error {
+func (s *Server) VoteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
sort.Sort(prm.Validators)
- return s.voteForSidechainValidator(prm)
-}
-
-// WriteReport composes the audit result structure from the audit report
-// and sends it to Audit contract.
-func (s *Server) WriteReport(r *audit.Report) error {
- res := r.Result()
- res.SetAuditorKey(s.pubKey)
-
- prm := auditClient.PutPrm{}
- prm.SetResult(res)
-
- return s.auditClient.PutAuditResult(prm)
+ return s.voteForSidechainValidator(ctx, prm)
}
// ResetEpochTimer resets the block timer that produces events to update epoch
@@ -165,16 +154,27 @@ func (s *Server) ResetEpochTimer(h uint32) error {
return s.epochTimer.Reset()
}
-func (s *Server) setHealthStatus(hs control.HealthStatus) {
- s.healthStatus.Store(hs)
- if s.metrics != nil {
- s.metrics.SetHealth(int32(hs))
+func (s *Server) setHealthStatus(ctx context.Context, hs control.HealthStatus) {
+ s.healthStatus.Store(int32(hs))
+ s.notifySystemd(ctx, hs)
+ if s.irMetrics != nil {
+ s.irMetrics.SetHealth(int32(hs))
}
}
+func (s *Server) CompareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
+ if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
+ s.notifySystemd(ctx, newSt)
+ if s.irMetrics != nil {
+ s.irMetrics.SetHealth(int32(newSt))
+ }
+ }
+ return
+}
+
// HealthStatus returns the current health status of the IR application.
func (s *Server) HealthStatus() control.HealthStatus {
- return s.healthStatus.Load().(control.HealthStatus)
+ return control.HealthStatus(s.healthStatus.Load())
}
func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, error) {
@@ -186,3 +186,23 @@ func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, err
return persistStorage, nil
}
+
+func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) {
+ if !s.sdNotify {
+ return
+ }
+ var err error
+ switch st {
+ case control.HealthStatus_READY:
+ err = sdnotify.FlagAndStatus(sdnotify.ReadyEnabled)
+ case control.HealthStatus_SHUTTING_DOWN:
+ err = sdnotify.FlagAndStatus(sdnotify.StoppingEnabled)
+ case control.HealthStatus_RECONFIGURING:
+ err = sdnotify.FlagAndStatus(sdnotify.ReloadingEnabled)
+ default:
+ err = sdnotify.Status(fmt.Sprintf("%v", st))
+ }
+ if err != nil {
+ s.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
+ }
+}
diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go
new file mode 100644
index 0000000000..f60ca87c45
--- /dev/null
+++ b/pkg/innerring/state_test.go
@@ -0,0 +1,54 @@
+package innerring
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+func TestServerState(t *testing.T) {
+ keyStr := "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae"
+ commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{keyStr})
+ require.NoError(t, err, "convert string to commitee public keys failed")
+ cf := &testCommiteeFetcher{
+ keys: commiteeKeys,
+ }
+
+ irKeys, err := keys.NewPublicKeysFromStrings([]string{keyStr})
+ require.NoError(t, err, "convert string to IR public keys failed")
+ irf := &testIRFetcher{
+ keys: irKeys,
+ }
+
+ key, err := keys.NewPublicKeyFromString(keyStr)
+ require.NoError(t, err, "convert string to public key failed")
+
+ require.NoError(t, err, "failed to create morph client")
+ srv := &Server{
+ statusIndex: newInnerRingIndexer(cf, irf, key, time.Second),
+ morphClient: &client.Client{},
+ }
+
+ var epoch uint64 = 100
+ srv.SetEpochCounter(epoch)
+ require.Equal(t, epoch, srv.EpochCounter(), "invalid epoch counter")
+
+ var epochDuration uint64 = 15
+ srv.SetEpochDuration(epochDuration)
+ require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration")
+
+ var healthStatus control.HealthStatus = control.HealthStatus_READY
+ srv.setHealthStatus(context.Background(), healthStatus)
+ require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status")
+
+ require.True(t, srv.IsActive(context.Background()), "invalid IsActive result")
+ require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result")
+ require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index")
+ require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index")
+ require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index")
+}
diff --git a/pkg/innerring/subnet.go b/pkg/innerring/subnet.go
deleted file mode 100644
index 5375029d47..0000000000
--- a/pkg/innerring/subnet.go
+++ /dev/null
@@ -1,354 +0,0 @@
-package innerring
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "errors"
- "fmt"
-
- irsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/subnet"
- netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- subnetevents "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet"
- subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- neogoutil "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
-)
-
-// IR server's component to handle Subnet contract notifications.
-type subnetHandler struct {
- workerPool util.WorkerPool
-
- morphClient morphsubnet.Client
-
- putValidator irsubnet.PutValidator
-}
-
-// configuration of subnet component.
-type subnetConfig struct {
- queueSize uint32
-}
-
-// makes IR server to catch Subnet notifications from the sidechain listener,
-// and to release the corresponding processing queue on stop.
-func (s *Server) initSubnet(cfg subnetConfig) {
- s.registerStarter(func() error {
- var err error
-
- // initialize queue for processing of the events from Subnet contract
- s.subnetHandler.workerPool, err = ants.NewPool(int(cfg.queueSize), ants.WithNonblocking(true))
- if err != nil {
- return fmt.Errorf("subnet queue initialization: %w", err)
- }
-
- // initialize morph client of Subnet contract
- clientMode := morphsubnet.NotaryAlphabet
-
- if s.sideNotaryConfig.disabled {
- clientMode = morphsubnet.NonNotary
- }
-
- var initPrm morphsubnet.InitPrm
-
- initPrm.SetBaseClient(s.morphClient)
- initPrm.SetContractAddress(s.contracts.subnet)
- initPrm.SetMode(clientMode)
-
- err = s.subnetHandler.morphClient.Init(initPrm)
- if err != nil {
- return fmt.Errorf("init morph subnet client: %w", err)
- }
-
- s.listenSubnet()
-
- return nil
- })
-
- s.registerCloser(func() error {
- s.stopSubnet()
- return nil
- })
-}
-
-// releases the Subnet contract notification processing queue.
-func (s *Server) stopSubnet() {
- s.workerPool.Release()
-}
-
-// names of listened notification events from Subnet contract.
-const (
- subnetCreateEvName = "Put"
- subnetRemoveEvName = "Delete"
- notarySubnetCreateEvName = "put"
-)
-
-// makes the IR server to listen to notifications of Subnet contract.
-// All required resources must be initialized before (initSubnet).
-// It works in one of two modes (configured): notary and non-notary.
-//
-// All handlers are executed only if the local node is an alphabet one.
-//
-// Events (notary):
-// - put (parser: subnetevents.ParseNotaryPut, handler: catchSubnetCreation);
-// - Delete (parser: subnetevents.ParseDelete, handler: catchSubnetCreation).
-//
-// Events (non-notary):
-// - Put (parser: subnetevents.ParsePut, handler: catchSubnetCreation);
-// - Delete (parser: subnetevents.ParseDelete, handler: catchSubnetCreation).
-func (s *Server) listenSubnet() {
- if s.sideNotaryConfig.disabled {
- s.listenSubnetWithoutNotary()
- return
- }
-
- var (
- parserInfo event.NotaryParserInfo
- handlerInfo event.NotaryHandlerInfo
- )
-
- parserInfo.SetScriptHash(s.contracts.subnet)
- handlerInfo.SetScriptHash(s.contracts.subnet)
-
- listenNotaryEvent := func(notifyName string, parser event.NotaryParser, handler event.Handler) {
- notifyTyp := event.NotaryTypeFromString(notifyName)
-
- parserInfo.SetMempoolType(mempoolevent.TransactionAdded)
- handlerInfo.SetMempoolType(mempoolevent.TransactionAdded)
-
- parserInfo.SetParser(parser)
- handlerInfo.SetHandler(handler)
-
- parserInfo.SetRequestType(notifyTyp)
- handlerInfo.SetRequestType(notifyTyp)
-
- s.morphListener.SetNotaryParser(parserInfo)
- s.morphListener.RegisterNotaryHandler(handlerInfo)
- }
-
- // subnet creation
- listenNotaryEvent(notarySubnetCreateEvName, subnetevents.ParseNotaryPut, s.onlyAlphabetEventHandler(s.catchSubnetCreation))
- // subnet removal
- listenNotifySubnetEvent(s, subnetRemoveEvName, subnetevents.ParseDelete, s.onlyAlphabetEventHandler(s.catchSubnetRemoval))
-}
-
-func (s *Server) listenSubnetWithoutNotary() {
- // subnet creation
- listenNotifySubnetEvent(s, subnetCreateEvName, subnetevents.ParsePut, s.onlyAlphabetEventHandler(s.catchSubnetCreation))
- // subnet removal
- listenNotifySubnetEvent(s, subnetRemoveEvName, subnetevents.ParseDelete, s.onlyAlphabetEventHandler(s.catchSubnetRemoval))
-}
-
-func listenNotifySubnetEvent(s *Server, notifyName string, parser event.NotificationParser, handler event.Handler) {
- var (
- parserInfo event.NotificationParserInfo
- handlerInfo event.NotificationHandlerInfo
- )
-
- parserInfo.SetScriptHash(s.contracts.subnet)
- handlerInfo.SetScriptHash(s.contracts.subnet)
-
- notifyTyp := event.TypeFromString(notifyName)
-
- parserInfo.SetType(notifyTyp)
- handlerInfo.SetType(notifyTyp)
-
- parserInfo.SetParser(parser)
- handlerInfo.SetHandler(handler)
-
- s.morphListener.SetNotificationParser(parserInfo)
- s.morphListener.RegisterNotificationHandler(handlerInfo)
-}
-
-// catchSubnetCreation catches event of subnet creation from listener and queues the processing.
-func (s *Server) catchSubnetCreation(e event.Event) {
- err := s.subnetHandler.workerPool.Submit(func() {
- s.handleSubnetCreation(e)
- })
- if err != nil {
- s.log.Error("subnet creation queue failure",
- zap.String("error", err.Error()),
- )
- }
-}
-
-// implements irsubnet.Put event interface required by irsubnet.PutValidator.
-type putSubnetEvent struct {
- ev subnetevents.Put
-}
-
-// ReadID unmarshals the subnet ID from a binary FrostFS API protocol's format.
-func (x putSubnetEvent) ReadID(id *subnetid.ID) error {
- return id.Unmarshal(x.ev.ID())
-}
-
-var errMissingSubnetOwner = errors.New("missing subnet owner")
-
-// ReadCreator unmarshals the subnet creator from a binary FrostFS API protocol's format.
-// Returns an error if the byte array is empty.
-func (x putSubnetEvent) ReadCreator(id *user.ID) error {
- data := x.ev.Owner()
-
- if len(data) == 0 {
- return errMissingSubnetOwner
- }
-
- key, err := keys.NewPublicKeyFromBytes(data, elliptic.P256())
- if err != nil {
- return err
- }
-
- user.IDFromKey(id, (ecdsa.PublicKey)(*key))
-
- return nil
-}
-
-// ReadInfo unmarshal the subnet info from a binary FrostFS API protocol's format.
-func (x putSubnetEvent) ReadInfo(info *subnet.Info) error {
- return info.Unmarshal(x.ev.Info())
-}
-
-// handleSubnetCreation handles an event of subnet creation parsed via subnetevents.ParsePut.
-//
-// Validates the event using irsubnet.PutValidator. Logs message about (dis)agreement.
-func (s *Server) handleSubnetCreation(e event.Event) {
- putEv := e.(subnetevents.Put) // panic occurs only if we registered handler incorrectly
-
- err := s.subnetHandler.putValidator.Assert(putSubnetEvent{
- ev: putEv,
- })
- if err != nil {
- s.log.Info("discard subnet creation",
- zap.String("reason", err.Error()),
- )
-
- return
- }
-
- notaryMainTx := putEv.NotaryMainTx()
-
- isNotary := notaryMainTx != nil
- if isNotary {
- // re-sign notary request
- err = s.morphClient.NotarySignAndInvokeTX(notaryMainTx)
- } else {
- // send new transaction
- var prm morphsubnet.PutPrm
-
- prm.SetID(putEv.ID())
- prm.SetOwner(putEv.Owner())
- prm.SetInfo(putEv.Info())
- prm.SetTxHash(putEv.TxHash())
-
- _, err = s.subnetHandler.morphClient.Put(prm)
- }
-
- if err != nil {
- s.log.Error("approve subnet creation",
- zap.Bool("notary", isNotary),
- zap.String("error", err.Error()),
- )
-
- return
- }
-}
-
-// catchSubnetRemoval catches an event of subnet removal from listener and queues the processing.
-func (s *Server) catchSubnetRemoval(e event.Event) {
- err := s.subnetHandler.workerPool.Submit(func() {
- s.handleSubnetRemoval(e)
- })
- if err != nil {
- s.log.Error("subnet removal handling failure",
- zap.String("error", err.Error()),
- )
- }
-}
-
-// handleSubnetRemoval handles event of subnet removal parsed via subnetevents.ParseDelete.
-func (s *Server) handleSubnetRemoval(e event.Event) {
- delEv := e.(subnetevents.Delete) // panic occurs only if we registered handler incorrectly
-
- // handle subnet changes in netmap
-
- candidates, err := s.netmapClient.GetCandidates()
- if err != nil {
- s.log.Error("getting netmap candidates",
- zap.Error(err),
- )
-
- return
- }
-
- var removedID subnetid.ID
- err = removedID.Unmarshal(delEv.ID())
- if err != nil {
- s.log.Error("unmarshalling removed subnet ID",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- for i := range candidates {
- s.processCandidate(delEv.TxHash(), removedID, candidates[i])
- }
-}
-
-func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.ID, c netmap.NodeInfo) {
- removeSubnet := false
- log := s.log.With(
- zap.String("public_key", netmap.StringifyPublicKey(c)),
- zap.String("removed_subnet", removedID.String()),
- )
-
- err := c.IterateSubnets(func(id subnetid.ID) error {
- if removedID.Equals(id) {
- removeSubnet = true
- return netmap.ErrRemoveSubnet
- }
-
- return nil
- })
- if err != nil {
- log.Error("iterating node's subnets", zap.Error(err))
- log.Debug("removing node from netmap candidates")
-
- var updateStatePrm netmapclient.UpdatePeerPrm
- updateStatePrm.SetKey(c.PublicKey())
- updateStatePrm.SetHash(txHash)
-
- err = s.netmapClient.UpdatePeerState(updateStatePrm)
- if err != nil {
- log.Error("removing node from candidates",
- zap.Error(err),
- )
- }
-
- return
- }
-
- // remove subnet from node's information
- // if it contains removed subnet
- if removeSubnet {
- log.Debug("removing subnet from the node")
-
- var addPeerPrm netmapclient.AddPeerPrm
- addPeerPrm.SetNodeInfo(c)
- addPeerPrm.SetHash(txHash)
-
- err = s.netmapClient.AddPeer(addPeerPrm)
- if err != nil {
- log.Error("updating subnet info",
- zap.Error(err),
- )
- }
- }
-}
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go
index a49324406c..a6c40f9fa7 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go
@@ -3,11 +3,12 @@ package blobovnicza
import (
"io/fs"
"os"
+ "sync"
+ "sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.etcd.io/bbolt"
- "go.uber.org/atomic"
"go.uber.org/zap"
)
@@ -15,9 +16,13 @@ import (
type Blobovnicza struct {
cfg
- filled atomic.Uint64
+ dataSize atomic.Uint64
+ itemsCount atomic.Uint64
boltDB *bbolt.DB
+
+ opened bool
+ controlMtx sync.Mutex
}
// Option is an option of Blobovnicza's constructor.
@@ -31,6 +36,8 @@ type cfg struct {
objSizeLimit uint64
log *logger.Logger
+
+ metrics Metrics
}
type boltDBCfg struct {
@@ -51,7 +58,8 @@ func defaultCfg(c *cfg) {
},
fullSizeLimit: 1 << 30, // 1GB
objSizeLimit: 1 << 20, // 1MB
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
+ metrics: &NoopMetrics{},
}
}
@@ -102,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option {
// WithLogger returns an option to specify Blobovnicza's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))}
+ c.log = l
}
}
@@ -112,3 +120,10 @@ func WithReadOnly(ro bool) Option {
c.boltOptions.ReadOnly = ro
}
}
+
+// WithMetrics returns an option to set metrics storage.
+func WithMetrics(m Metrics) Option {
+ return func(c *cfg) {
+ c.metrics = m
+ }
+}
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
index 43dbe1b981..95fdd844b4 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
@@ -1,12 +1,13 @@
package blobovnicza
import (
- "errors"
- "math/rand"
+ "context"
+ "crypto/rand"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -20,7 +21,7 @@ func testPutGet(t *testing.T, blz *Blobovnicza, addr oid.Address, sz uint64, ass
var pPut PutPrm
pPut.SetAddress(addr)
pPut.SetMarshaledObject(data)
- _, err := blz.Put(pPut)
+ _, err := blz.Put(context.Background(), pPut)
if assertErrPut != nil {
require.True(t, assertErrPut(err))
} else {
@@ -39,7 +40,7 @@ func testGet(t *testing.T, blz *Blobovnicza, addr oid.Address, expObj []byte, as
pGet.SetAddress(addr)
// try to read object from Blobovnicza
- res, err := blz.Get(pGet)
+ res, err := blz.Get(context.Background(), pGet)
if assertErr != nil {
require.True(t, assertErr(err))
} else {
@@ -52,8 +53,6 @@ func testGet(t *testing.T, blz *Blobovnicza, addr oid.Address, expObj []byte, as
}
func TestBlobovnicza(t *testing.T) {
- rand.Seed(1024)
-
p := "./test_blz"
sizeLim := uint64(256 * 1 << 10) // 256KB
@@ -64,19 +63,19 @@ func TestBlobovnicza(t *testing.T) {
WithPath(p),
WithObjectSizeLimit(objSizeLim),
WithFullSizeLimit(sizeLim),
- WithLogger(test.NewLogger(false)),
+ WithLogger(test.NewLogger(t)),
)
defer os.Remove(p)
// open Blobovnicza
- require.NoError(t, blz.Open())
+ require.NoError(t, blz.Open(context.Background()))
// initialize Blobovnicza
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Init(context.Background()))
// try to read non-existent address
- testGet(t, blz, oidtest.Address(), nil, IsErrNotFound)
+ testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound)
filled := uint64(15 * 1 << 10)
@@ -87,21 +86,21 @@ func TestBlobovnicza(t *testing.T) {
var dPrm DeletePrm
dPrm.SetAddress(addr)
- _, err := blz.Delete(dPrm)
+ _, err := blz.Delete(context.Background(), dPrm)
require.NoError(t, err)
// should return 404
- testGet(t, blz, addr, nil, IsErrNotFound)
+ testGet(t, blz, addr, nil, client.IsErrObjectNotFound)
// fill Blobovnicza fully
for ; filled < sizeLim; filled += objSizeLim {
testPutGet(t, blz, oidtest.Address(), objSizeLim, nil, nil)
}
- // from now objects should not be saved
+ // blobovnizca accepts object event if full
testPutGet(t, blz, oidtest.Address(), 1024, func(err error) bool {
- return errors.Is(err, ErrFull)
+ return err == nil
}, nil)
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go
index 3912deac01..4947512cc7 100644
--- a/pkg/local_object_storage/blobovnicza/control.go
+++ b/pkg/local_object_storage/blobovnicza/control.go
@@ -1,10 +1,12 @@
package blobovnicza
import (
+ "context"
+ "errors"
"fmt"
- "os"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"go.etcd.io/bbolt"
"go.uber.org/zap"
@@ -13,8 +15,16 @@ import (
// Open opens an internal database at the configured path with the configured permissions.
//
// If the database file does not exist, it will be created automatically.
-func (b *Blobovnicza) Open() error {
- b.log.Debug("creating directory for BoltDB",
+// If blobovnicza is already open, does nothing.
+func (b *Blobovnicza) Open(ctx context.Context) error {
+ b.controlMtx.Lock()
+ defer b.controlMtx.Unlock()
+
+ if b.opened {
+ return nil
+ }
+
+ b.log.Debug(ctx, logs.BlobovniczaCreatingDirectoryForBoltDB,
zap.String("path", b.path),
zap.Bool("ro", b.boltOptions.ReadOnly),
)
@@ -28,12 +38,16 @@ func (b *Blobovnicza) Open() error {
}
}
- b.log.Debug("opening BoltDB",
+ b.log.Debug(ctx, logs.BlobovniczaOpeningBoltDB,
zap.String("path", b.path),
zap.Stringer("permissions", b.perm),
)
b.boltDB, err = bbolt.Open(b.path, b.perm, b.boltOptions)
+ if err == nil {
+ b.opened = true
+ b.metrics.IncOpenBlobovniczaCount()
+ }
return err
}
@@ -41,54 +55,129 @@ func (b *Blobovnicza) Open() error {
// Init initializes internal database structure.
//
// If Blobovnicza is already initialized, no action is taken.
-//
-// Should not be called in read-only configuration.
-func (b *Blobovnicza) Init() error {
- b.log.Debug("initializing...",
+// Blobovnicza must be open, otherwise an error will return.
+func (b *Blobovnicza) Init(ctx context.Context) error {
+ b.controlMtx.Lock()
+ defer b.controlMtx.Unlock()
+
+ if !b.opened {
+ return errors.New("blobovnicza is not open")
+ }
+
+ b.log.Debug(ctx, logs.BlobovniczaInitializing,
zap.Uint64("object size limit", b.objSizeLimit),
zap.Uint64("storage size limit", b.fullSizeLimit),
)
- if size := b.filled.Load(); size != 0 {
- b.log.Debug("already initialized", zap.Uint64("size", size))
+ size := b.dataSize.Load()
+ items := b.itemsCount.Load()
+ if size != 0 || items != 0 {
+ b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
return nil
}
- err := b.boltDB.Update(func(tx *bbolt.Tx) error {
- return b.iterateBucketKeys(func(lower, upper uint64, key []byte) (bool, error) {
- // create size range bucket
+ if !b.boltOptions.ReadOnly {
+ err := b.boltDB.Update(func(tx *bbolt.Tx) error {
+ return b.iterateBucketKeys(true, func(lower, upper uint64, key []byte) (bool, error) {
+ // create size range bucket
- rangeStr := stringifyBounds(lower, upper)
- b.log.Debug("creating bucket for size range",
- zap.String("range", rangeStr))
+ rangeStr := stringifyBounds(lower, upper)
+ b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange,
+ zap.String("range", rangeStr))
- _, err := tx.CreateBucketIfNotExists(key)
- if err != nil {
- return false, fmt.Errorf("(%T) could not create bucket for bounds %s: %w",
- b, rangeStr, err)
- }
+ _, err := tx.CreateBucketIfNotExists(key)
+ if err != nil {
+ return false, fmt.Errorf("(%T) could not create bucket for bounds %s: %w",
+ b, rangeStr, err)
+ }
- return false, nil
+ return false, nil
+ })
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return b.initializeCounters(ctx)
+}
+
+func (b *Blobovnicza) ObjectsCount() uint64 {
+ return b.itemsCount.Load()
+}
+
+func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
+ var size uint64
+ var items uint64
+ var sizeExists bool
+ var itemsCountExists bool
+
+ err := b.boltDB.View(func(tx *bbolt.Tx) error {
+ size, sizeExists = hasDataSize(tx)
+ items, itemsCountExists = hasItemsCount(tx)
+
+ if sizeExists && itemsCountExists {
+ return nil
+ }
+
+ return b.iterateAllDataBuckets(tx, func(_, _ uint64, b *bbolt.Bucket) (bool, error) {
+ return false, b.ForEach(func(k, v []byte) error {
+ size += uint64(len(k) + len(v))
+ items++
+ return nil
+ })
})
})
if err != nil {
- return err
+ return fmt.Errorf("determine DB size: %w", err)
+ }
+ if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
+ b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
+ if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
+ if err := saveDataSize(tx, size); err != nil {
+ return err
+ }
+ return saveItemsCount(tx, items)
+ }); err != nil {
+ b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
+ return fmt.Errorf("save blobovnicza's size and items count: %w", err)
+ }
+ b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
}
- info, err := os.Stat(b.path)
- if err != nil {
- return fmt.Errorf("can't determine DB size: %w", err)
- }
-
- b.filled.Store(uint64(info.Size()))
- return err
+ b.dataSize.Store(size)
+ b.itemsCount.Store(items)
+ b.metrics.AddOpenBlobovniczaSize(size)
+ b.metrics.AddOpenBlobovniczaItems(items)
+ return nil
}
// Close releases all internal database resources.
-func (b *Blobovnicza) Close() error {
- b.log.Debug("closing BoltDB",
+//
+// If blobovnicza is already closed, does nothing.
+func (b *Blobovnicza) Close(ctx context.Context) error {
+ b.controlMtx.Lock()
+ defer b.controlMtx.Unlock()
+
+ if !b.opened {
+ return nil
+ }
+
+ b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB,
zap.String("path", b.path),
)
- return b.boltDB.Close()
+ if err := b.boltDB.Close(); err != nil {
+ return err
+ }
+
+ b.metrics.DecOpenBlobovniczaCount()
+ b.metrics.SubOpenBlobovniczaSize(b.dataSize.Load())
+ b.metrics.SubOpenBlobovniczaItems(b.itemsCount.Load())
+ b.dataSize.Store(0)
+ b.itemsCount.Store(0)
+
+ b.opened = false
+
+ return nil
}
diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go
index 1f885bd8e9..8f24b56756 100644
--- a/pkg/local_object_storage/blobovnicza/delete.go
+++ b/pkg/local_object_storage/blobovnicza/delete.go
@@ -1,9 +1,17 @@
package blobovnicza
import (
+ "context"
+ "errors"
+ "syscall"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -13,8 +21,7 @@ type DeletePrm struct {
}
// DeleteRes groups the resulting values of Delete operation.
-type DeleteRes struct {
-}
+type DeleteRes struct{}
// SetAddress sets the address of the requested object.
func (p *DeletePrm) SetAddress(addr oid.Address) {
@@ -29,45 +36,69 @@ func (p *DeletePrm) SetAddress(addr oid.Address) {
// Returns an error of type apistatus.ObjectNotFound if the object to be deleted is not in blobovnicza.
//
// Should not be called in read-only configuration.
-func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) {
+func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Delete",
+ trace.WithAttributes(
+ attribute.String("path", b.path),
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
addrKey := addressKey(prm.addr)
- removed := false
+ found := false
+ var sizeUpperBound uint64
+ var sizeLowerBound uint64
+ var dataSize uint64
+ var recordSize uint64
err := b.boltDB.Update(func(tx *bbolt.Tx) error {
- return b.iterateBuckets(tx, func(lower, upper uint64, buck *bbolt.Bucket) (bool, error) {
+ err := b.iterateAllDataBuckets(tx, func(lower, upper uint64, buck *bbolt.Bucket) (bool, error) {
objData := buck.Get(addrKey)
if objData == nil {
// object is not in bucket => continue iterating
return false, nil
}
-
- sz := uint64(len(objData))
-
- // decrease fullness counter
- b.decSize(sz)
-
- // remove object from the bucket
- err := buck.Delete(addrKey)
-
- if err == nil {
- b.log.Debug("object was removed from bucket",
- zap.String("binary size", stringifyByteSize(sz)),
- zap.String("range", stringifyBounds(lower, upper)),
- )
- }
-
- removed = true
-
- // stop iteration
- return true, err
+ dataSize = uint64(len(objData))
+ sizeLowerBound = lower
+ sizeUpperBound = upper
+ recordSize = dataSize + uint64(len(addrKey))
+ found = true
+ return true, buck.Delete(addrKey)
})
+ if err != nil {
+ return err
+ }
+ if found {
+ return updateMeta(tx, func(count, size uint64) (uint64, uint64) {
+ if count > 0 {
+ count--
+ }
+ if size >= recordSize {
+ size -= recordSize
+ } else {
+ size = 0
+ }
+ return count, size
+ })
+ }
+ return nil
})
- if err == nil && !removed {
- var errNotFound apistatus.ObjectNotFound
+ if err == nil && !found {
+ return DeleteRes{}, new(apistatus.ObjectNotFound)
+ }
- return DeleteRes{}, errNotFound
+ if err == nil && found {
+ b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
+ zap.String("binary size", stringifyByteSize(dataSize)),
+ zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
+ )
+ b.itemDeleted(recordSize)
+ }
+
+ if errors.Is(err, syscall.ENOSPC) {
+ err = ErrNoSpace
}
return DeleteRes{}, err
diff --git a/pkg/local_object_storage/blobovnicza/errors.go b/pkg/local_object_storage/blobovnicza/errors.go
index 2b0575c923..cff8c17766 100644
--- a/pkg/local_object_storage/blobovnicza/errors.go
+++ b/pkg/local_object_storage/blobovnicza/errors.go
@@ -1,13 +1,6 @@
package blobovnicza
-import (
- "errors"
+import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
-)
-
-// IsErrNotFound checks if the error returned by Blobovnicza Get/Delete method
-// corresponds to the missing object.
-func IsErrNotFound(err error) bool {
- return errors.As(err, new(apistatus.ObjectNotFound))
-}
+// ErrNoSpace returned if blobovnicza failed to perform an operation because of syscall.ENOSPC.
+var ErrNoSpace = logicerr.New("no space left on device with blobovnicza")
diff --git a/pkg/local_object_storage/blobovnicza/exists.go b/pkg/local_object_storage/blobovnicza/exists.go
index 8ac45c4aa6..f7bc84d4af 100644
--- a/pkg/local_object_storage/blobovnicza/exists.go
+++ b/pkg/local_object_storage/blobovnicza/exists.go
@@ -1,19 +1,33 @@
package blobovnicza
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Exists check if object with the specified address is stored in b.
-func (b *Blobovnicza) Exists(addr oid.Address) (bool, error) {
- var (
- exists bool
- addrKey = addressKey(addr)
- )
+func (b *Blobovnicza) Exists(ctx context.Context, addr oid.Address) (bool, error) {
+ exists := false
+
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Exists",
+ trace.WithAttributes(
+ attribute.String("path", b.path),
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ addrKey := addressKey(addr)
err := b.boltDB.View(func(tx *bbolt.Tx) error {
- return tx.ForEach(func(_ []byte, buck *bbolt.Bucket) error {
+ return tx.ForEach(func(bucketName []byte, buck *bbolt.Bucket) error {
+ if isNonDataBucket(bucketName) {
+ return nil
+ }
exists = buck.Get(addrKey) != nil
if exists {
return errInterruptForEach
diff --git a/pkg/local_object_storage/blobovnicza/get.go b/pkg/local_object_storage/blobovnicza/get.go
index 776f08d2b9..600323f558 100644
--- a/pkg/local_object_storage/blobovnicza/get.go
+++ b/pkg/local_object_storage/blobovnicza/get.go
@@ -1,12 +1,16 @@
package blobovnicza
import (
+ "bytes"
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// GetPrm groups the parameters of Get operation.
@@ -39,20 +43,31 @@ var errInterruptForEach = errors.New("interrupt for-each")
//
// Returns an error of type apistatus.ObjectNotFound if the requested object is not
// presented in Blobovnicza.
-func (b *Blobovnicza) Get(prm GetPrm) (GetRes, error) {
+func (b *Blobovnicza) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Get",
+ trace.WithAttributes(
+ attribute.String("path", b.path),
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
var (
data []byte
addrKey = addressKey(prm.addr)
)
if err := b.boltDB.View(func(tx *bbolt.Tx) error {
- return tx.ForEach(func(_ []byte, buck *bbolt.Bucket) error {
+ return tx.ForEach(func(bucketName []byte, buck *bbolt.Bucket) error {
+ if isNonDataBucket(bucketName) {
+ return nil
+ }
+
data = buck.Get(addrKey)
if data == nil {
return nil
}
- data = slice.Copy(data)
+ data = bytes.Clone(data)
return errInterruptForEach
})
@@ -61,9 +76,7 @@ func (b *Blobovnicza) Get(prm GetPrm) (GetRes, error) {
}
if data == nil {
- var errNotFound apistatus.ObjectNotFound
-
- return GetRes{}, errNotFound
+ return GetRes{}, new(apistatus.ObjectNotFound)
}
return GetRes{
diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go
index 98097e9c81..5a382c1590 100644
--- a/pkg/local_object_storage/blobovnicza/get_test.go
+++ b/pkg/local_object_storage/blobovnicza/get_test.go
@@ -1,7 +1,7 @@
package blobovnicza
import (
- "os"
+ "context"
"path/filepath"
"testing"
@@ -14,15 +14,11 @@ func TestBlobovnicza_Get(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
var blz *Blobovnicza
-
- t.Cleanup(func() {
- blz.Close()
- os.RemoveAll(filename)
- })
+ defer func() { require.NoError(t, blz.Close(context.Background())) }()
fnInit := func(szLimit uint64) {
if blz != nil {
- require.NoError(t, blz.Close())
+ require.NoError(t, blz.Close(context.Background()))
}
blz = New(
@@ -30,8 +26,8 @@ func TestBlobovnicza_Get(t *testing.T) {
WithObjectSizeLimit(szLimit),
)
- require.NoError(t, blz.Open())
- require.NoError(t, blz.Init())
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
}
// initial distribution: [0:32K] (32K:64K]
@@ -40,7 +36,7 @@ func TestBlobovnicza_Get(t *testing.T) {
addr := oidtest.Address()
obj := make([]byte, firstBucketBound+1)
- exists, err := blz.Exists(addr)
+ exists, err := blz.Exists(context.Background(), addr)
require.NoError(t, err)
require.False(t, exists)
@@ -49,18 +45,18 @@ func TestBlobovnicza_Get(t *testing.T) {
prmPut.SetMarshaledObject(obj)
// place object to [32K:64K] bucket
- _, err = blz.Put(prmPut)
+ _, err = blz.Put(context.Background(), prmPut)
require.NoError(t, err)
var prmGet GetPrm
prmGet.SetAddress(addr)
checkObj := func() {
- res, err := blz.Get(prmGet)
+ res, err := blz.Get(context.Background(), prmGet)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
- exists, err := blz.Exists(addr)
+ exists, err := blz.Exists(context.Background(), addr)
require.NoError(t, err)
require.True(t, exists)
}
diff --git a/pkg/local_object_storage/blobovnicza/iterate.go b/pkg/local_object_storage/blobovnicza/iterate.go
index 1adfacbc01..cd33b263c4 100644
--- a/pkg/local_object_storage/blobovnicza/iterate.go
+++ b/pkg/local_object_storage/blobovnicza/iterate.go
@@ -1,34 +1,44 @@
package blobovnicza
import (
+ "bytes"
+ "context"
"fmt"
+ "math"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
-func (b *Blobovnicza) iterateBuckets(tx *bbolt.Tx, f func(uint64, uint64, *bbolt.Bucket) (bool, error)) error {
- return b.iterateBucketKeys(func(lower uint64, upper uint64, key []byte) (bool, error) {
+// iterateAllDataBuckets iterates all buckets in db
+//
+// If the maximum size of the object (b.objSizeLimit) has been changed to lower value,
+// then there may be more buckets than the current limit of the object size.
+func (b *Blobovnicza) iterateAllDataBuckets(tx *bbolt.Tx, f func(uint64, uint64, *bbolt.Bucket) (bool, error)) error {
+ return b.iterateBucketKeys(false, func(lower uint64, upper uint64, key []byte) (bool, error) {
buck := tx.Bucket(key)
if buck == nil {
- // expected to happen:
- // - before initialization step (incorrect usage by design)
- // - if DB is corrupted (in future this case should be handled)
- return false, fmt.Errorf("(%T) could not get bucket %s", b, stringifyBounds(lower, upper))
+ return true, nil
}
return f(lower, upper, buck)
})
}
-func (b *Blobovnicza) iterateBucketKeys(f func(uint64, uint64, []byte) (bool, error)) error {
- return b.iterateBounds(func(lower, upper uint64) (bool, error) {
+func (b *Blobovnicza) iterateBucketKeys(useObjLimitBound bool, f func(uint64, uint64, []byte) (bool, error)) error {
+ return b.iterateBounds(useObjLimitBound, func(lower, upper uint64) (bool, error) {
return f(lower, upper, bucketKeyFromBounds(upper))
})
}
-func (b *Blobovnicza) iterateBounds(f func(uint64, uint64) (bool, error)) error {
- objLimitBound := upperPowerOfTwo(b.objSizeLimit)
+func (b *Blobovnicza) iterateBounds(useObjLimitBound bool, f func(uint64, uint64) (bool, error)) error {
+ var objLimitBound uint64 = math.MaxUint64
+ if useObjLimitBound {
+ objLimitBound = upperPowerOfTwo(b.objSizeLimit)
+ }
for upper := firstBucketBound; upper <= max(objLimitBound, firstBucketBound); upper *= 2 {
var lower uint64
@@ -47,14 +57,6 @@ func (b *Blobovnicza) iterateBounds(f func(uint64, uint64) (bool, error)) error
return nil
}
-func max(a, b uint64) uint64 {
- if a > b {
- return a
- }
-
- return b
-}
-
// IterationElement represents a unit of elements through which Iterate operation passes.
type IterationElement struct {
addr oid.Address
@@ -107,8 +109,7 @@ func (x *IteratePrm) IgnoreErrors() {
}
// IterateRes groups the resulting values of Iterate operation.
-type IterateRes struct {
-}
+type IterateRes struct{}
// Iterate goes through all stored objects, and passes IterationElement to parameterized handler until error return.
//
@@ -117,23 +118,40 @@ type IterateRes struct {
// Returns handler's errors directly. Returns nil after iterating finish.
//
// Handler should not retain object data. Handler must not be nil.
-func (b *Blobovnicza) Iterate(prm IteratePrm) (IterateRes, error) {
+func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Iterate",
+ trace.WithAttributes(
+ attribute.String("path", b.path),
+ attribute.Bool("decode_addresses", prm.decodeAddresses),
+ attribute.Bool("without_data", prm.withoutData),
+ attribute.Bool("ignore_errors", prm.ignoreErrors),
+ ))
+ defer span.End()
+
var elem IterationElement
if err := b.boltDB.View(func(tx *bbolt.Tx) error {
- return tx.ForEach(func(name []byte, buck *bbolt.Bucket) error {
+ return tx.ForEach(func(bucketName []byte, buck *bbolt.Bucket) error {
+ if isNonDataBucket(bucketName) {
+ return nil
+ }
return buck.ForEach(func(k, v []byte) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
if prm.decodeAddresses {
if err := addressFromKey(&elem.addr, k); err != nil {
if prm.ignoreErrors {
return nil
}
- return fmt.Errorf("could not decode address key: %w", err)
+ return fmt.Errorf("decode address key: %w", err)
}
}
if !prm.withoutData {
- elem.data = v
+ elem.data = bytes.Clone(v)
}
return prm.handler(elem)
@@ -147,7 +165,7 @@ func (b *Blobovnicza) Iterate(prm IteratePrm) (IterateRes, error) {
}
// IterateAddresses is a helper function which iterates over Blobovnicza and passes addresses of the objects to f.
-func IterateAddresses(blz *Blobovnicza, f func(oid.Address) error) error {
+func IterateAddresses(ctx context.Context, blz *Blobovnicza, f func(oid.Address) error) error {
var prm IteratePrm
prm.DecodeAddresses()
@@ -157,7 +175,7 @@ func IterateAddresses(blz *Blobovnicza, f func(oid.Address) error) error {
return f(elem.Address())
})
- _, err := blz.Iterate(prm)
+ _, err := blz.Iterate(ctx, prm)
return err
}
diff --git a/pkg/local_object_storage/blobovnicza/iterate_test.go b/pkg/local_object_storage/blobovnicza/iterate_test.go
index 6ecb20c77f..7172747816 100644
--- a/pkg/local_object_storage/blobovnicza/iterate_test.go
+++ b/pkg/local_object_storage/blobovnicza/iterate_test.go
@@ -1,12 +1,13 @@
package blobovnicza
import (
+ "bytes"
+ "context"
"errors"
"path/filepath"
"testing"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/nspcc-dev/neo-go/pkg/util/slice"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
@@ -14,12 +15,12 @@ import (
func TestBlobovniczaIterate(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
b := New(WithPath(filename))
- require.NoError(t, b.Open())
- require.NoError(t, b.Init())
+ require.NoError(t, b.Open(context.Background()))
+ require.NoError(t, b.Init(context.Background()))
data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}}
addr := oidtest.Address()
- _, err := b.Put(PutPrm{addr: addr, objData: data[0]})
+ _, err := b.Put(context.Background(), PutPrm{addr: addr, objData: data[0]})
require.NoError(t, err)
require.NoError(t, b.boltDB.Update(func(tx *bbolt.Tx) error {
@@ -29,26 +30,26 @@ func TestBlobovniczaIterate(t *testing.T) {
seen := make([][]byte, 0, 2)
inc := func(e IterationElement) error {
- seen = append(seen, slice.Copy(e.data))
+ seen = append(seen, bytes.Clone(e.data))
return nil
}
- _, err = b.Iterate(IteratePrm{handler: inc})
+ _, err = b.Iterate(context.Background(), IteratePrm{handler: inc})
require.NoError(t, err)
require.ElementsMatch(t, seen, data)
seen = seen[:0]
- _, err = b.Iterate(IteratePrm{handler: inc, decodeAddresses: true})
+ _, err = b.Iterate(context.Background(), IteratePrm{handler: inc, decodeAddresses: true})
require.Error(t, err)
seen = seen[:0]
- _, err = b.Iterate(IteratePrm{handler: inc, decodeAddresses: true, ignoreErrors: true})
+ _, err = b.Iterate(context.Background(), IteratePrm{handler: inc, decodeAddresses: true, ignoreErrors: true})
require.NoError(t, err)
require.ElementsMatch(t, seen, data[:1])
seen = seen[:0]
expectedErr := errors.New("stop iteration")
- _, err = b.Iterate(IteratePrm{
+ _, err = b.Iterate(context.Background(), IteratePrm{
decodeAddresses: true,
handler: func(IterationElement) error { return expectedErr },
ignoreErrors: true,
diff --git a/pkg/local_object_storage/blobovnicza/meta.go b/pkg/local_object_storage/blobovnicza/meta.go
new file mode 100644
index 0000000000..3316d4666f
--- /dev/null
+++ b/pkg/local_object_storage/blobovnicza/meta.go
@@ -0,0 +1,104 @@
+package blobovnicza
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "go.etcd.io/bbolt"
+)
+
+const (
+ dataSizeAndItemsCountBufLength = 8
+)
+
+var (
+ metaBucketName = []byte("META")
+ dataSizeKey = []byte("data_size")
+ itemsCountKey = []byte("items_count")
+)
+
+func isNonDataBucket(bucketName []byte) bool {
+ return bytes.Equal(bucketName, incompletedMoveBucketName) || bytes.Equal(bucketName, metaBucketName)
+}
+
+func hasDataSize(tx *bbolt.Tx) (uint64, bool) {
+ b := tx.Bucket(metaBucketName)
+ if b == nil {
+ return 0, false
+ }
+ v := b.Get(dataSizeKey)
+ if v == nil {
+ return 0, false
+ }
+ if len(v) != dataSizeAndItemsCountBufLength {
+ return 0, false
+ }
+ return binary.LittleEndian.Uint64(v), true
+}
+
+func hasItemsCount(tx *bbolt.Tx) (uint64, bool) {
+ b := tx.Bucket(metaBucketName)
+ if b == nil {
+ return 0, false
+ }
+ v := b.Get(itemsCountKey)
+ if v == nil {
+ return 0, false
+ }
+ if len(v) != dataSizeAndItemsCountBufLength {
+ return 0, false
+ }
+ return binary.LittleEndian.Uint64(v), true
+}
+
+func saveDataSize(tx *bbolt.Tx, size uint64) error {
+ b, err := tx.CreateBucketIfNotExists(metaBucketName)
+ if err != nil {
+ return err
+ }
+ buf := make([]byte, dataSizeAndItemsCountBufLength)
+ binary.LittleEndian.PutUint64(buf, size)
+ return b.Put(dataSizeKey, buf)
+}
+
+func saveItemsCount(tx *bbolt.Tx, count uint64) error {
+ b, err := tx.CreateBucketIfNotExists(metaBucketName)
+ if err != nil {
+ return err
+ }
+ buf := make([]byte, dataSizeAndItemsCountBufLength)
+ binary.LittleEndian.PutUint64(buf, count)
+ return b.Put(itemsCountKey, buf)
+}
+
+func updateMeta(tx *bbolt.Tx, updateValues func(count, size uint64) (uint64, uint64)) error {
+ b, err := tx.CreateBucketIfNotExists(metaBucketName)
+ if err != nil {
+ return err
+ }
+
+ var count uint64
+ var size uint64
+
+ v := b.Get(itemsCountKey)
+ if v != nil {
+ count = binary.LittleEndian.Uint64(v)
+ }
+
+ v = b.Get(dataSizeKey)
+ if v != nil {
+ size = binary.LittleEndian.Uint64(v)
+ }
+
+ count, size = updateValues(count, size)
+
+ sizeBuf := make([]byte, dataSizeAndItemsCountBufLength)
+ binary.LittleEndian.PutUint64(sizeBuf, size)
+ if err := b.Put(dataSizeKey, sizeBuf); err != nil {
+ return err
+ }
+
+ countBuf := make([]byte, dataSizeAndItemsCountBufLength)
+ binary.LittleEndian.PutUint64(countBuf, count)
+ return b.Put(itemsCountKey, countBuf)
+}
diff --git a/pkg/local_object_storage/blobovnicza/metrics.go b/pkg/local_object_storage/blobovnicza/metrics.go
new file mode 100644
index 0000000000..37352b083f
--- /dev/null
+++ b/pkg/local_object_storage/blobovnicza/metrics.go
@@ -0,0 +1,21 @@
+package blobovnicza
+
+type Metrics interface {
+ IncOpenBlobovniczaCount()
+ DecOpenBlobovniczaCount()
+
+ AddOpenBlobovniczaSize(size uint64)
+ SubOpenBlobovniczaSize(size uint64)
+
+ AddOpenBlobovniczaItems(items uint64)
+ SubOpenBlobovniczaItems(items uint64)
+}
+
+type NoopMetrics struct{}
+
+func (m *NoopMetrics) IncOpenBlobovniczaCount() {}
+func (m *NoopMetrics) DecOpenBlobovniczaCount() {}
+func (m *NoopMetrics) AddOpenBlobovniczaSize(uint64) {}
+func (m *NoopMetrics) SubOpenBlobovniczaSize(uint64) {}
+func (m *NoopMetrics) AddOpenBlobovniczaItems(uint64) {}
+func (m *NoopMetrics) SubOpenBlobovniczaItems(uint64) {}
diff --git a/pkg/local_object_storage/blobovnicza/move.go b/pkg/local_object_storage/blobovnicza/move.go
new file mode 100644
index 0000000000..420e22a487
--- /dev/null
+++ b/pkg/local_object_storage/blobovnicza/move.go
@@ -0,0 +1,119 @@
+package blobovnicza
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "syscall"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+var incompletedMoveBucketName = []byte("INCOMPLETED_MOVE")
+
+type MoveInfo struct {
+ Address oid.Address
+ TargetStorageID []byte
+}
+
+func (b *Blobovnicza) PutMoveInfo(ctx context.Context, prm MoveInfo) error {
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.PutMoveInfo",
+ trace.WithAttributes(
+ attribute.String("path", b.path),
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("target_storage_id", string(prm.TargetStorageID)),
+ ))
+ defer span.End()
+
+ key := addressKey(prm.Address)
+
+ err := b.boltDB.Update(func(tx *bbolt.Tx) error {
+ bucket, err := tx.CreateBucketIfNotExists(incompletedMoveBucketName)
+ if err != nil {
+ return err
+ }
+
+ if err := bucket.Put(key, prm.TargetStorageID); err != nil {
+ return fmt.Errorf("(%T) failed to save move info: %w", b, err)
+ }
+
+ return nil
+ })
+
+ if errors.Is(err, syscall.ENOSPC) {
+ err = ErrNoSpace
+ }
+ return err
+}
+
+func (b *Blobovnicza) DropMoveInfo(ctx context.Context, address oid.Address) error {
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.DropMoveInfo",
+ trace.WithAttributes(
+ attribute.String("path", b.path),
+ attribute.String("address", address.EncodeToString()),
+ ))
+ defer span.End()
+
+ key := addressKey(address)
+
+ err := b.boltDB.Update(func(tx *bbolt.Tx) error {
+ bucket := tx.Bucket(incompletedMoveBucketName)
+ if bucket == nil {
+ return nil
+ }
+
+ if err := bucket.Delete(key); err != nil {
+ return fmt.Errorf("(%T) failed to drop move info: %w", b, err)
+ }
+
+ c := bucket.Cursor()
+ k, v := c.First()
+ bucketEmpty := k == nil && v == nil
+ if bucketEmpty {
+ return tx.DeleteBucket(incompletedMoveBucketName)
+ }
+
+ return nil
+ })
+ if errors.Is(err, syscall.ENOSPC) {
+ err = ErrNoSpace
+ }
+ return err
+}
+
+func (b *Blobovnicza) ListMoveInfo(ctx context.Context) ([]MoveInfo, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.ListMoveInfo",
+ trace.WithAttributes(
+ attribute.String("path", b.path),
+ ))
+ defer span.End()
+
+ var result []MoveInfo
+ if err := b.boltDB.View(func(tx *bbolt.Tx) error {
+ bucket := tx.Bucket(incompletedMoveBucketName)
+ if bucket == nil {
+ return nil
+ }
+ return bucket.ForEach(func(k, v []byte) error {
+ var addr oid.Address
+ storageID := make([]byte, len(v))
+ if err := addressFromKey(&addr, k); err != nil {
+ return err
+ }
+ copy(storageID, v)
+ result = append(result, MoveInfo{
+ Address: addr,
+ TargetStorageID: storageID,
+ })
+ return nil
+ })
+ }); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
diff --git a/pkg/local_object_storage/blobovnicza/put.go b/pkg/local_object_storage/blobovnicza/put.go
index 37ed57e1cc..ff223ba365 100644
--- a/pkg/local_object_storage/blobovnicza/put.go
+++ b/pkg/local_object_storage/blobovnicza/put.go
@@ -1,11 +1,17 @@
package blobovnicza
import (
+ "context"
+ "errors"
"fmt"
+ "syscall"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// PutPrm groups the parameters of Put operation.
@@ -13,15 +19,12 @@ type PutPrm struct {
addr oid.Address
objData []byte
+
+ force bool
}
// PutRes groups the resulting values of Put operation.
-type PutRes struct {
-}
-
-// ErrFull is returned when trying to save an
-// object to a filled blobovnicza.
-var ErrFull = logicerr.New("blobovnicza is full")
+type PutRes struct{}
// SetAddress sets the address of the saving object.
func (p *PutPrm) SetAddress(addr oid.Address) {
@@ -33,6 +36,11 @@ func (p *PutPrm) SetMarshaledObject(data []byte) {
p.objData = data
}
+// SetForce sets force option.
+func (p *PutPrm) SetForce(f bool) {
+ p.force = f
+}
+
// Put saves an object in Blobovnicza.
//
// If binary representation of the object is not set,
@@ -47,22 +55,35 @@ func (p *PutPrm) SetMarshaledObject(data []byte) {
// Returns ErrFull if blobovnicza is filled.
//
// Should not be called in read-only configuration.
-func (b *Blobovnicza) Put(prm PutPrm) (PutRes, error) {
+func (b *Blobovnicza) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Put",
+ trace.WithAttributes(
+ attribute.String("path", b.path),
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Int("size", len(prm.objData)),
+ ))
+ defer span.End()
+
sz := uint64(len(prm.objData))
bucketName := bucketForSize(sz)
key := addressKey(prm.addr)
+ recordSize := sz + uint64(len(key))
err := b.boltDB.Batch(func(tx *bbolt.Tx) error {
- if b.full() {
- return ErrFull
- }
-
buck := tx.Bucket(bucketName)
if buck == nil {
// expected to happen:
// - before initialization step (incorrect usage by design)
// - if DB is corrupted (in future this case should be handled)
- return logicerr.Wrap(fmt.Errorf("(%T) bucket for size %d not created", b, sz))
+ // - blobovnicza's object size changed before rebuild (handled if prm.force flag specified)
+ if !prm.force {
+ return logicerr.Wrap(fmt.Errorf("(%T) bucket for size %d not created", b, sz))
+ }
+ var err error
+ buck, err = tx.CreateBucket(bucketName)
+ if err != nil {
+ return fmt.Errorf("(%T) failed to create bucket for size %d: %w", b, sz, err)
+ }
}
// save the object in bucket
@@ -70,10 +91,14 @@ func (b *Blobovnicza) Put(prm PutPrm) (PutRes, error) {
return fmt.Errorf("(%T) could not save object in bucket: %w", b, err)
}
- return nil
+ return updateMeta(tx, func(count, size uint64) (uint64, uint64) {
+ return count + 1, size + recordSize
+ })
})
if err == nil {
- b.incSize(sz)
+ b.itemAdded(recordSize)
+ } else if errors.Is(err, syscall.ENOSPC) {
+ err = ErrNoSpace
}
return PutRes{}, err
diff --git a/pkg/local_object_storage/blobovnicza/sizes.go b/pkg/local_object_storage/blobovnicza/sizes.go
index 5b89760c6f..9bbed0db58 100644
--- a/pkg/local_object_storage/blobovnicza/sizes.go
+++ b/pkg/local_object_storage/blobovnicza/sizes.go
@@ -3,6 +3,8 @@ package blobovnicza
import (
"encoding/binary"
"fmt"
+ "math"
+ "math/bits"
"strconv"
)
@@ -31,21 +33,31 @@ func bucketForSize(sz uint64) []byte {
return bucketKeyFromBounds(upperPowerOfTwo(sz))
}
-func upperPowerOfTwo(v uint64) (upperBound uint64) {
- for upperBound = firstBucketBound; upperBound < v; upperBound *= 2 {
+func upperPowerOfTwo(v uint64) uint64 {
+ if v <= firstBucketBound {
+ return firstBucketBound
}
-
- return
+ return 1 << bits.Len64(v-1)
}
-func (b *Blobovnicza) incSize(sz uint64) {
- b.filled.Add(sz)
+func (b *Blobovnicza) itemAdded(itemSize uint64) {
+ b.dataSize.Add(itemSize)
+ b.itemsCount.Add(1)
+ b.metrics.AddOpenBlobovniczaSize(itemSize)
+ b.metrics.AddOpenBlobovniczaItems(1)
}
-func (b *Blobovnicza) decSize(sz uint64) {
- b.filled.Sub(sz)
+func (b *Blobovnicza) itemDeleted(itemSize uint64) {
+ b.dataSize.Add(^(itemSize - 1))
+ b.itemsCount.Add(math.MaxUint64)
+ b.metrics.SubOpenBlobovniczaSize(itemSize)
+ b.metrics.SubOpenBlobovniczaItems(1)
}
-func (b *Blobovnicza) full() bool {
- return b.filled.Load() >= b.fullSizeLimit
+func (b *Blobovnicza) IsFull() bool {
+ return b.dataSize.Load() >= b.fullSizeLimit
+}
+
+func (b *Blobovnicza) FillPercent() int {
+ return int(100.0 * (float64(b.dataSize.Load()) / float64(b.fullSizeLimit)))
}
diff --git a/pkg/local_object_storage/blobovnicza/sizes_test.go b/pkg/local_object_storage/blobovnicza/sizes_test.go
index c218dc6388..d582fc5e49 100644
--- a/pkg/local_object_storage/blobovnicza/sizes_test.go
+++ b/pkg/local_object_storage/blobovnicza/sizes_test.go
@@ -1,6 +1,7 @@
package blobovnicza
import (
+ "fmt"
"testing"
"github.com/stretchr/testify/require"
@@ -33,6 +34,17 @@ func TestSizes(t *testing.T) {
upperBound: 4 * firstBucketBound,
},
} {
- require.Equal(t, bucketKeyFromBounds(item.upperBound), bucketForSize(item.sz))
+ key := bucketForSize(item.sz)
+ require.Equal(t, bucketKeyFromBounds(item.upperBound), key)
+ }
+}
+
+func BenchmarkUpperBound(b *testing.B) {
+ for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} {
+ b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
+ for range b.N {
+ _ = upperPowerOfTwo(size)
+ }
+ })
}
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/active.go b/pkg/local_object_storage/blobstor/blobovniczatree/active.go
new file mode 100644
index 0000000000..dbaa7387a1
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/active.go
@@ -0,0 +1,209 @@
+package blobovniczatree
+
+import (
+ "context"
+ "path/filepath"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
+)
+
+type activeDB struct {
+ blz *blobovnicza.Blobovnicza
+ shDB *sharedDB
+}
+
+func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza {
+ return db.blz
+}
+
+func (db *activeDB) Close(ctx context.Context) {
+ db.shDB.Close(ctx)
+}
+
+func (db *activeDB) SystemPath() string {
+ return db.shDB.SystemPath()
+}
+
+// activeDBManager manages active blobovnicza instances (that is, those that are being used for Put).
+//
+// Uses dbManager for opening/closing sharedDB instances.
+// Stores a reference to an open active sharedDB, so dbManager does not close it.
+// When changing the active sharedDB, releases the reference to the previous active sharedDB.
+type activeDBManager struct {
+ levelToActiveDBGuard *sync.RWMutex
+ levelToActiveDB map[string]*sharedDB
+ levelLock *utilSync.KeyLocker[string]
+ closed bool
+
+ dbManager *dbManager
+ rootPath string
+}
+
+func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager {
+ return &activeDBManager{
+ levelToActiveDBGuard: &sync.RWMutex{},
+ levelToActiveDB: make(map[string]*sharedDB),
+ levelLock: utilSync.NewKeyLocker[string](),
+
+ dbManager: dbManager,
+ rootPath: rootPath,
+ }
+}
+
+// GetOpenedActiveDBForLevel returns active DB for level.
+// DB must be closed after use.
+func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath string) (*activeDB, error) {
+ activeDB, err := m.getCurrentActiveIfOk(ctx, lvlPath)
+ if err != nil {
+ return nil, err
+ }
+ if activeDB != nil {
+ return activeDB, nil
+ }
+
+ return m.updateAndGetActive(ctx, lvlPath)
+}
+
+func (m *activeDBManager) Open() {
+ m.levelToActiveDBGuard.Lock()
+ defer m.levelToActiveDBGuard.Unlock()
+
+ m.closed = false
+}
+
+func (m *activeDBManager) Close(ctx context.Context) {
+ m.levelToActiveDBGuard.Lock()
+ defer m.levelToActiveDBGuard.Unlock()
+
+ for _, db := range m.levelToActiveDB {
+ db.Close(ctx)
+ }
+ m.levelToActiveDB = make(map[string]*sharedDB)
+ m.closed = true
+}
+
+func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath string) (*activeDB, error) {
+ m.levelToActiveDBGuard.RLock()
+ defer m.levelToActiveDBGuard.RUnlock()
+
+ if m.closed {
+ return nil, errClosed
+ }
+
+ db, ok := m.levelToActiveDB[lvlPath]
+ if !ok {
+ return nil, nil
+ }
+
+ blz, err := db.Open(ctx) // open db for usage, will be closed on activeDB.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ if blz.IsFull() {
+ db.Close(ctx)
+ return nil, nil
+ }
+
+ return &activeDB{
+ blz: blz,
+ shDB: db,
+ }, nil
+}
+
+func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string) (*activeDB, error) {
+ m.levelLock.Lock(lvlPath)
+ defer m.levelLock.Unlock(lvlPath)
+
+ current, err := m.getCurrentActiveIfOk(ctx, lvlPath)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ return current, nil
+ }
+
+ nextShDB, err := m.getNextSharedDB(ctx, lvlPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if nextShDB == nil {
+ return nil, nil
+ }
+
+ blz, err := nextShDB.Open(ctx) // open db for client, client must call Close() after usage
+ if err != nil {
+ return nil, err
+ }
+ return &activeDB{
+ blz: blz,
+ shDB: nextShDB,
+ }, nil
+}
+
+func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (*sharedDB, error) {
+ var nextActiveDBIdx uint64
+ hasActive, currentIdx := m.hasActiveDB(lvlPath)
+ if hasActive {
+ nextActiveDBIdx = currentIdx + 1
+ } else {
+ hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(m.rootPath, lvlPath))
+ if err != nil {
+ return nil, err
+ }
+ if hasDBs {
+ nextActiveDBIdx = maxIdx
+ }
+ }
+
+ path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx))
+ next := m.dbManager.GetByPath(path)
+ _, err := next.Open(ctx) // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ previous, updated := m.replace(lvlPath, next)
+ if !updated && next != nil {
+ next.Close(ctx) // manager is closed, so don't hold active DB open
+ }
+ if updated && previous != nil {
+ previous.Close(ctx)
+ }
+ return next, nil
+}
+
+func (m *activeDBManager) hasActiveDB(lvlPath string) (bool, uint64) {
+ m.levelToActiveDBGuard.RLock()
+ defer m.levelToActiveDBGuard.RUnlock()
+
+ if m.closed {
+ return false, 0
+ }
+
+ db, ok := m.levelToActiveDB[lvlPath]
+ if !ok {
+ return false, 0
+ }
+ return true, u64FromHexString(filepath.Base(db.SystemPath()))
+}
+
+func (m *activeDBManager) replace(lvlPath string, shDB *sharedDB) (*sharedDB, bool) {
+ m.levelToActiveDBGuard.Lock()
+ defer m.levelToActiveDBGuard.Unlock()
+
+ if m.closed {
+ return nil, false
+ }
+
+ previous := m.levelToActiveDB[lvlPath]
+ if shDB == nil {
+ delete(m.levelToActiveDB, lvlPath)
+ } else {
+ m.levelToActiveDB[lvlPath] = shDB
+ }
+ return previous, true
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
index c628c96bef..3e8b9f07b6 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
@@ -1,20 +1,17 @@
package blobovniczatree
import (
+ "context"
"errors"
- "fmt"
- "path/filepath"
+ "os"
"strconv"
+ "strings"
"sync"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
- "github.com/hashicorp/golang-lru/v2/simplelru"
- "go.uber.org/zap"
)
// Blobovniczas represents the storage of the "small" objects.
@@ -60,36 +57,24 @@ import (
type Blobovniczas struct {
cfg
- // cache of opened filled Blobovniczas
- opened *simplelru.LRU[string, *blobovnicza.Blobovnicza]
- // lruMtx protects opened cache.
- // It isn't RWMutex because `Get` calls must
- // lock this mutex on write, as LRU info is updated.
- // It must be taken after activeMtx in case when eviction is possible
- // i.e. `Add`, `Purge` and `Remove` calls.
- lruMtx sync.Mutex
-
- // mutex to exclude parallel bbolt.Open() calls
- // bbolt.Open() deadlocks if it tries to open already opened file
- openMtx sync.Mutex
-
- // list of active (opened, non-filled) Blobovniczas
- activeMtx sync.RWMutex
- active map[string]blobovniczaWithIndex
-}
-
-type blobovniczaWithIndex struct {
- ind uint64
-
- blz *blobovnicza.Blobovnicza
+ commondbManager *dbManager
+ activeDBManager *activeDBManager
+ dbCache *dbCache
+ deleteProtectedObjects *addressMap
+ dbFilesGuard *sync.RWMutex
+ rebuildGuard *sync.RWMutex
}
var _ common.Storage = (*Blobovniczas)(nil)
var errPutFailed = errors.New("could not save the object in any blobovnicza")
-// NewBlobovniczaTree returns new instance of blobovnizas tree.
-func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
+const (
+ dbExtension = ".db"
+)
+
+// NewBlobovniczaTree returns new instance of blobovniczas tree.
+func NewBlobovniczaTree(ctx context.Context, opts ...Option) (blz *Blobovniczas) {
blz = new(Blobovniczas)
initConfig(&blz.cfg)
@@ -97,116 +82,17 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
opts[i](&blz.cfg)
}
- cache, err := simplelru.NewLRU[string, *blobovnicza.Blobovnicza](blz.openedCacheSize, func(p string, value *blobovnicza.Blobovnicza) {
- lvlPath := filepath.Dir(p)
- if b, ok := blz.active[lvlPath]; ok && b.ind == u64FromHexString(filepath.Base(p)) {
- // This branch is taken if we have recently updated active blobovnicza and remove
- // it from opened cache.
- return
- } else if err := value.Close(); err != nil {
- blz.log.Error("could not close Blobovnicza",
- zap.String("id", p),
- zap.String("error", err.Error()),
- )
- } else {
- blz.log.Debug("blobovnicza successfully closed on evict",
- zap.String("id", p),
- )
- }
- })
- if err != nil {
- // occurs only if the size is not positive
- panic(fmt.Errorf("could not create LRU cache of size %d: %w", blz.openedCacheSize, err))
- }
-
- cp := uint64(1)
- for i := uint64(0); i < blz.blzShallowDepth; i++ {
- cp *= blz.blzShallowWidth
- }
-
- blz.opened = cache
- blz.active = make(map[string]blobovniczaWithIndex, cp)
+ blz.commondbManager = newDBManager(blz.rootPath, blz.blzOpts, blz.readOnly, blz.metrics.Blobovnicza(), blz.log)
+ blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.rootPath)
+ blz.dbCache = newDBCache(ctx, blz.openedCacheSize,
+ blz.openedCacheTTL, blz.openedCacheExpInterval, blz.commondbManager)
+ blz.deleteProtectedObjects = newAddressMap()
+ blz.dbFilesGuard = &sync.RWMutex{}
+ blz.rebuildGuard = &sync.RWMutex{}
return blz
}
-// activates and returns activated blobovnicza of p-level (dir).
-//
-// returns error if blobvnicza could not be activated.
-func (b *Blobovniczas) getActivated(lvlPath string) (blobovniczaWithIndex, error) {
- return b.updateAndGet(lvlPath, nil)
-}
-
-// updates active blobovnicza of p-level (dir).
-//
-// if current active blobovnicza's index is not old, it remains unchanged.
-func (b *Blobovniczas) updateActive(lvlPath string, old *uint64) error {
- b.log.Debug("updating active blobovnicza...", zap.String("path", lvlPath))
-
- _, err := b.updateAndGet(lvlPath, old)
-
- b.log.Debug("active blobovnicza successfully updated", zap.String("path", lvlPath))
-
- return err
-}
-
-// updates and returns active blobovnicza of p-level (dir).
-//
-// if current active blobovnicza's index is not old, it is returned unchanged.
-func (b *Blobovniczas) updateAndGet(lvlPath string, old *uint64) (blobovniczaWithIndex, error) {
- b.activeMtx.RLock()
- active, ok := b.active[lvlPath]
- b.activeMtx.RUnlock()
-
- if ok {
- if old != nil {
- if active.ind == b.blzShallowWidth-1 {
- return active, logicerr.New("no more Blobovniczas")
- } else if active.ind != *old {
- // sort of CAS in order to control concurrent
- // updateActive calls
- return active, nil
- }
- } else {
- return active, nil
- }
-
- active.ind++
- }
-
- var err error
- if active.blz, err = b.openBlobovnicza(filepath.Join(lvlPath, u64ToHexString(active.ind))); err != nil {
- return active, err
- }
-
- b.activeMtx.Lock()
- defer b.activeMtx.Unlock()
-
- // check 2nd time to find out if it blobovnicza was activated while thread was locked
- tryActive, ok := b.active[lvlPath]
- if ok && tryActive.blz == active.blz {
- return tryActive, nil
- }
-
- // Remove from opened cache (active blobovnicza should always be opened).
- // Because `onEvict` callback is called in `Remove`, we need to update
- // active map beforehand.
- b.active[lvlPath] = active
-
- activePath := filepath.Join(lvlPath, u64ToHexString(active.ind))
- b.lruMtx.Lock()
- b.opened.Remove(activePath)
- if ok {
- b.opened.Add(filepath.Join(lvlPath, u64ToHexString(tryActive.ind)), tryActive.blz)
- }
- b.lruMtx.Unlock()
-
- b.log.Debug("blobovnicza successfully activated",
- zap.String("path", activePath))
-
- return active, nil
-}
-
// returns hash of the object address.
func addressHash(addr *oid.Address, path string) uint64 {
var a string
@@ -215,24 +101,49 @@ func addressHash(addr *oid.Address, path string) uint64 {
a = addr.EncodeToString()
}
- return hrw.Hash([]byte(a + path))
+ return hrw.StringHash(a + path)
}
-// converts uint64 to hex string.
func u64ToHexString(ind uint64) string {
return strconv.FormatUint(ind, 16)
}
-// converts uint64 hex string to uint64.
+func u64ToHexStringExt(ind uint64) string {
+ return strconv.FormatUint(ind, 16) + dbExtension
+}
+
func u64FromHexString(str string) uint64 {
- v, err := strconv.ParseUint(str, 16, 64)
+ v, err := strconv.ParseUint(strings.TrimSuffix(str, dbExtension), 16, 64)
if err != nil {
- panic(fmt.Sprintf("blobovnicza name is not an index %s", str))
+ panic("blobovnicza name is not an index " + str)
}
return v
}
+func getBlobovniczaMaxIndex(directory string) (bool, uint64, error) {
+ entries, err := os.ReadDir(directory)
+ if os.IsNotExist(err) { // non initialized tree
+ return false, 0, nil
+ }
+ if err != nil {
+ return false, 0, err
+ }
+ if len(entries) == 0 {
+ return false, 0, nil
+ }
+ var hasDBs bool
+ var maxIdx uint64
+ for _, e := range entries {
+ if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
+ continue
+ }
+ hasDBs = true
+ maxIdx = max(u64FromHexString(e.Name()), maxIdx)
+ }
+ return hasDBs, maxIdx, nil
+}
+
// Type is blobovniczatree storage type used in logs and configuration.
const Type = "blobovnicza"
@@ -247,11 +158,19 @@ func (b *Blobovniczas) Path() string {
}
// SetCompressor implements common.Storage.
-func (b *Blobovniczas) SetCompressor(cc *compression.Config) {
+func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) {
b.compression = cc
}
+func (b *Blobovniczas) Compressor() *compression.Compressor {
+ return b.compression
+}
+
// SetReportErrorFunc implements common.Storage.
-func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) {
+func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) {
b.reportError = f
}
+
+func (b *Blobovniczas) SetParentID(parentID string) {
+ b.metrics.SetParentID(parentID)
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
new file mode 100644
index 0000000000..04ff5120c2
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
@@ -0,0 +1,157 @@
+package blobovniczatree
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
+ cache "github.com/go-pkgz/expirable-cache/v3"
+)
+
+// dbCache caches sharedDB instances that are NOT open for Put.
+//
+// Uses dbManager for opening/closing sharedDB instances.
+// Stores a reference to a cached sharedDB, so dbManager does not close it.
+type dbCache struct {
+ cacheGuard *sync.Mutex
+ cache cache.Cache[string, *sharedDB]
+ pathLock *utilSync.KeyLocker[string] // the order of locks is important: pathLock first, cacheGuard second
+ closed bool
+ nonCached map[string]struct{}
+ wg sync.WaitGroup
+ cancel context.CancelFunc
+
+ dbManager *dbManager
+}
+
+func newDBCache(parentCtx context.Context, size int,
+ ttl time.Duration, expInterval time.Duration,
+ dbManager *dbManager,
+) *dbCache {
+ ch := cache.NewCache[string, *sharedDB]().
+ WithTTL(ttl).WithLRU().WithMaxKeys(size).
+ WithOnEvicted(func(_ string, db *sharedDB) {
+ db.Close(parentCtx)
+ })
+ ctx, cancel := context.WithCancel(parentCtx)
+ res := &dbCache{
+ cacheGuard: &sync.Mutex{},
+ wg: sync.WaitGroup{},
+ cancel: cancel,
+ cache: ch,
+ dbManager: dbManager,
+ pathLock: utilSync.NewKeyLocker[string](),
+ nonCached: make(map[string]struct{}),
+ }
+ if ttl > 0 {
+ res.wg.Add(1)
+ go func() {
+ ticker := time.NewTicker(expInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ res.wg.Done()
+ return
+ case <-ticker.C:
+ res.cacheGuard.Lock()
+ res.cache.DeleteExpired()
+ res.cacheGuard.Unlock()
+ }
+ }
+ }()
+ }
+ return res
+}
+
+func (c *dbCache) Open() {
+ c.cacheGuard.Lock()
+ defer c.cacheGuard.Unlock()
+
+ c.closed = false
+}
+
+func (c *dbCache) Close() {
+ c.cacheGuard.Lock()
+ defer c.cacheGuard.Unlock()
+ c.cancel()
+ c.wg.Wait()
+ c.cache.Purge()
+ c.closed = true
+}
+
+func (c *dbCache) GetOrCreate(ctx context.Context, path string) *sharedDB {
+ value := c.getExisted(path)
+ if value != nil {
+ return value
+ }
+ return c.create(ctx, path)
+}
+
+func (c *dbCache) EvictAndMarkNonCached(path string) {
+ c.pathLock.Lock(path)
+ defer c.pathLock.Unlock(path)
+
+ c.cacheGuard.Lock()
+ defer c.cacheGuard.Unlock()
+
+ c.cache.Remove(path)
+ c.nonCached[path] = struct{}{}
+}
+
+func (c *dbCache) RemoveFromNonCached(path string) {
+ c.pathLock.Lock(path)
+ defer c.pathLock.Unlock(path)
+
+ c.cacheGuard.Lock()
+ defer c.cacheGuard.Unlock()
+
+ delete(c.nonCached, path)
+}
+
+func (c *dbCache) getExisted(path string) *sharedDB {
+ c.cacheGuard.Lock()
+ defer c.cacheGuard.Unlock()
+
+ if value, ok := c.cache.Get(path); ok {
+ return value
+ } else if value != nil {
+ c.cache.Invalidate(path)
+ }
+ return nil
+}
+
+func (c *dbCache) create(ctx context.Context, path string) *sharedDB {
+ c.pathLock.Lock(path)
+ defer c.pathLock.Unlock(path)
+
+ value := c.getExisted(path)
+ if value != nil {
+ return value
+ }
+
+ value = c.dbManager.GetByPath(path)
+
+ _, err := value.Open(ctx) // open db to hold reference, closed by evictedDB.Close() or if cache closed
+ if err != nil {
+ return value
+ }
+ if added := c.put(path, value); !added {
+ value.Close(ctx)
+ }
+ return value
+}
+
+func (c *dbCache) put(path string, db *sharedDB) bool {
+ c.cacheGuard.Lock()
+ defer c.cacheGuard.Unlock()
+
+ _, isNonCached := c.nonCached[path]
+
+ if isNonCached || c.closed {
+ return false
+ }
+ c.cache.Add(path, db)
+ return true
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
new file mode 100644
index 0000000000..f87f4a1449
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
@@ -0,0 +1,62 @@
+package blobovniczatree
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBlobovniczaTree_Concurrency(t *testing.T) {
+ t.Parallel()
+ const n = 1000
+
+ st := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(1024),
+ WithBlobovniczaShallowWidth(10),
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(t.TempDir()))
+ require.NoError(t, st.Open(mode.ComponentReadWrite))
+ require.NoError(t, st.Init())
+ defer func() {
+ require.NoError(t, st.Close(context.Background()))
+ }()
+
+ objGen := &testutil.SeqObjGenerator{ObjSize: 1}
+
+ var cnt atomic.Int64
+ var wg sync.WaitGroup
+ for range 1000 {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for cnt.Add(1) <= n {
+ obj := objGen.Next()
+ addr := testutil.AddressFromObject(t, obj)
+
+ raw, err := obj.Marshal()
+ require.NoError(t, err)
+
+ _, err = st.Put(context.Background(), common.PutPrm{
+ Address: addr,
+ RawData: raw,
+ })
+ require.NoError(t, err)
+
+ _, err = st.Get(context.Background(), common.GetPrm{Address: addr})
+ require.NoError(t, err)
+ }
+ }()
+ }
+
+ wg.Wait()
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
index e7e890e502..a6c1ce3685 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
@@ -1,16 +1,22 @@
package blobovniczatree
import (
- "fmt"
- "path/filepath"
+ "context"
+ "strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
)
// Open opens blobovnicza tree.
-func (b *Blobovniczas) Open(readOnly bool) error {
- b.readOnly = readOnly
+func (b *Blobovniczas) Open(mode mode.ComponentMode) error {
+ b.readOnly = mode.ReadOnly()
+ b.metrics.SetMode(mode)
+ b.metrics.SetRebuildStatus(rebuildStatusNotStarted)
+ b.openManagers()
return nil
}
@@ -18,116 +24,76 @@ func (b *Blobovniczas) Open(readOnly bool) error {
//
// Should be called exactly once.
func (b *Blobovniczas) Init() error {
- b.log.Debug("initializing Blobovnicza's")
+ b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas)
if b.readOnly {
- b.log.Debug("read-only mode, skip blobovniczas initialization...")
+ b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
return nil
}
- return b.iterateLeaves(func(p string) (bool, error) {
- blz, err := b.openBlobovniczaNoCache(p)
- if err != nil {
- return true, err
- }
- defer blz.Close()
+ return b.initializeDBs(context.TODO())
+}
- if err := blz.Init(); err != nil {
- return true, fmt.Errorf("could not initialize blobovnicza structure %s: %w", p, err)
- }
+func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
+ err := util.MkdirAllX(b.rootPath, b.perm)
+ if err != nil {
+ return err
+ }
- b.log.Debug("blobovnicza successfully initialized, closing...", zap.String("id", p))
- return false, nil
+ eg, egCtx := errgroup.WithContext(ctx)
+ if b.blzInitWorkerCount > 0 {
+ eg.SetLimit(b.blzInitWorkerCount + 1)
+ }
+ eg.Go(func() error {
+ return b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) {
+ eg.Go(func() error {
+ p = strings.TrimSuffix(p, rebuildSuffix)
+ shBlz := b.getBlobovniczaWithoutCaching(p)
+ blz, err := shBlz.Open(egCtx)
+ if err != nil {
+ return err
+ }
+ defer shBlz.Close(egCtx)
+
+ moveInfo, err := blz.ListMoveInfo(egCtx)
+ if err != nil {
+ return err
+ }
+ for _, move := range moveInfo {
+ b.deleteProtectedObjects.Add(move.Address)
+ }
+
+ b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
+ return nil
+ })
+ return false, nil
+ })
})
+ return eg.Wait()
+}
+
+func (b *Blobovniczas) openManagers() {
+ b.commondbManager.Open() // order important
+ b.activeDBManager.Open()
+ b.dbCache.Open()
}
// Close implements common.Storage.
-func (b *Blobovniczas) Close() error {
- b.activeMtx.Lock()
-
- b.lruMtx.Lock()
-
- for p, v := range b.active {
- if err := v.blz.Close(); err != nil {
- b.log.Debug("could not close active blobovnicza",
- zap.String("path", p),
- zap.String("error", err.Error()),
- )
- }
- b.opened.Remove(p)
- }
- for _, k := range b.opened.Keys() {
- blz, _ := b.opened.Get(k)
- if err := blz.Close(); err != nil {
- b.log.Debug("could not close active blobovnicza",
- zap.String("path", k),
- zap.String("error", err.Error()),
- )
- }
- b.opened.Remove(k)
- }
-
- b.active = make(map[string]blobovniczaWithIndex)
-
- b.lruMtx.Unlock()
-
- b.activeMtx.Unlock()
+func (b *Blobovniczas) Close(ctx context.Context) error {
+ b.dbCache.Close() // order important
+ b.activeDBManager.Close(ctx)
+ b.commondbManager.Close()
return nil
}
-// opens and returns blobovnicza with path p.
+// returns blobovnicza with path p
//
-// If blobovnicza is already opened and cached, instance from cache is returned w/o changes.
-func (b *Blobovniczas) openBlobovnicza(p string) (*blobovnicza.Blobovnicza, error) {
- b.lruMtx.Lock()
- v, ok := b.opened.Get(p)
- b.lruMtx.Unlock()
- if ok {
- // blobovnicza should be opened in cache
- return v, nil
- }
-
- lvlPath := filepath.Dir(p)
- curIndex := u64FromHexString(filepath.Base(p))
-
- b.activeMtx.RLock()
- defer b.activeMtx.RUnlock()
-
- active, ok := b.active[lvlPath]
- if ok && active.ind == curIndex {
- return active.blz, nil
- }
-
- b.lruMtx.Lock()
- defer b.lruMtx.Unlock()
-
- v, ok = b.opened.Get(p)
- if ok {
- return v, nil
- }
-
- blz, err := b.openBlobovniczaNoCache(p)
- if err != nil {
- return nil, err
- }
-
- b.opened.Add(p, blz)
-
- return blz, nil
+// If blobovnicza is already cached, instance from cache is returned w/o changes.
+func (b *Blobovniczas) getBlobovnicza(ctx context.Context, p string) *sharedDB {
+ return b.dbCache.GetOrCreate(ctx, p)
}
-func (b *Blobovniczas) openBlobovniczaNoCache(p string) (*blobovnicza.Blobovnicza, error) {
- b.openMtx.Lock()
- defer b.openMtx.Unlock()
-
- blz := blobovnicza.New(append(b.blzOpts,
- blobovnicza.WithReadOnly(b.readOnly),
- blobovnicza.WithPath(filepath.Join(b.rootPath, p)),
- )...)
-
- if err := blz.Open(); err != nil {
- return nil, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
- }
- return blz, nil
+func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB {
+ return b.commondbManager.GetByPath(p)
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
new file mode 100644
index 0000000000..7db1891f96
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
@@ -0,0 +1,165 @@
+package blobovniczatree
+
+import (
+ "context"
+ "os"
+ "path"
+ "strconv"
+ "testing"
+
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "github.com/stretchr/testify/require"
+)
+
+func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
+ t.Parallel()
+
+ rootDir := t.TempDir()
+
+ blz := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaShallowDepth(3),
+ WithBlobovniczaShallowWidth(5),
+ WithRootPath(rootDir),
+ )
+
+ require.NoError(t, blz.Open(mode.ComponentReadWrite))
+ require.NoError(t, blz.Init())
+
+ obj35 := blobstortest.NewObject(10 * 1024)
+ addr35 := objectCore.AddressOf(obj35)
+ raw, err := obj35.Marshal()
+ require.NoError(t, err)
+
+ pRes35, err := blz.Put(context.Background(), common.PutPrm{
+ Address: addr35,
+ Object: obj35,
+ RawData: raw,
+ })
+ require.NoError(t, err)
+
+ gRes, err := blz.Get(context.Background(), common.GetPrm{
+ Address: addr35,
+ StorageID: pRes35.StorageID,
+ })
+ require.NoError(t, err)
+ require.EqualValues(t, obj35, gRes.Object)
+
+ gRes, err = blz.Get(context.Background(), common.GetPrm{
+ Address: addr35,
+ })
+ require.NoError(t, err)
+ require.EqualValues(t, obj35, gRes.Object)
+
+ require.NoError(t, blz.Close(context.Background()))
+
+ // change depth and width
+ blz = NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaShallowDepth(5),
+ WithBlobovniczaShallowWidth(2),
+ WithRootPath(rootDir),
+ )
+
+ require.NoError(t, blz.Open(mode.ComponentReadWrite))
+ require.NoError(t, blz.Init())
+
+ gRes, err = blz.Get(context.Background(), common.GetPrm{
+ Address: addr35,
+ StorageID: pRes35.StorageID,
+ })
+ require.NoError(t, err)
+ require.EqualValues(t, obj35, gRes.Object)
+
+ gRes, err = blz.Get(context.Background(), common.GetPrm{
+ Address: addr35,
+ })
+ require.NoError(t, err)
+ require.EqualValues(t, obj35, gRes.Object)
+
+ obj52 := blobstortest.NewObject(10 * 1024)
+ addr52 := objectCore.AddressOf(obj52)
+ raw, err = obj52.Marshal()
+ require.NoError(t, err)
+
+ pRes52, err := blz.Put(context.Background(), common.PutPrm{
+ Address: addr52,
+ Object: obj52,
+ RawData: raw,
+ })
+ require.NoError(t, err)
+
+ require.NoError(t, blz.Close(context.Background()))
+
+ // change depth and width back
+ blz = NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaShallowDepth(3),
+ WithBlobovniczaShallowWidth(5),
+ WithRootPath(rootDir),
+ )
+ require.NoError(t, blz.Open(mode.ComponentReadWrite))
+ require.NoError(t, blz.Init())
+
+ gRes, err = blz.Get(context.Background(), common.GetPrm{
+ Address: addr35,
+ StorageID: pRes35.StorageID,
+ })
+ require.NoError(t, err)
+ require.EqualValues(t, obj35, gRes.Object)
+
+ gRes, err = blz.Get(context.Background(), common.GetPrm{
+ Address: addr35,
+ })
+ require.NoError(t, err)
+ require.EqualValues(t, obj35, gRes.Object)
+
+ gRes, err = blz.Get(context.Background(), common.GetPrm{
+ Address: addr52,
+ StorageID: pRes52.StorageID,
+ })
+ require.NoError(t, err)
+ require.EqualValues(t, obj52, gRes.Object)
+
+ gRes, err = blz.Get(context.Background(), common.GetPrm{
+ Address: addr52,
+ })
+ require.NoError(t, err)
+ require.EqualValues(t, obj52, gRes.Object)
+
+ require.NoError(t, blz.Close(context.Background()))
+}
+
+func TestInitBlobovniczasInitErrorType(t *testing.T) {
+ t.Parallel()
+
+ rootDir := t.TempDir()
+
+ for idx := 0; idx < 10; idx++ {
+ f, err := os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"))
+ require.NoError(t, err)
+ _, err = f.Write([]byte("invalid db"))
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+
+ f, err = os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"+rebuildSuffix))
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+ }
+
+ blz := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaShallowDepth(1),
+ WithBlobovniczaShallowWidth(1),
+ WithRootPath(rootDir),
+ )
+
+ require.NoError(t, blz.Open(mode.ComponentReadWrite))
+ err := blz.Init()
+ require.Contains(t, err.Error(), "open blobovnicza")
+ require.Contains(t, err.Error(), "invalid database")
+ require.NoError(t, blz.Close(context.Background()))
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/count.go b/pkg/local_object_storage/blobstor/blobovniczatree/count.go
new file mode 100644
index 0000000000..b83849c775
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/count.go
@@ -0,0 +1,38 @@
+package blobovniczatree
+
+import (
+ "context"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+)
+
+func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) {
+ var (
+ success bool
+ startedAt = time.Now()
+ )
+ defer func() {
+ b.metrics.ObjectsCount(time.Since(startedAt), success)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
+ defer span.End()
+
+ var result uint64
+ err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
+ shDB := b.getBlobovniczaWithoutCaching(p)
+ blz, err := shDB.Open(ctx)
+ if err != nil {
+ return true, err
+ }
+ defer shDB.Close(ctx)
+
+ result += blz.ObjectsCount()
+ return false, nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ return result, nil
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
index 7e14d6d8d4..d096791c3a 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
@@ -1,58 +1,94 @@
package blobovniczatree
import (
- "path/filepath"
+ "context"
+ "encoding/hex"
+ "errors"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
+var (
+ errObjectIsDeleteProtected = errors.New("object is delete protected")
+ deleteRes = common.DeleteRes{}
+)
+
// Delete deletes object from blobovnicza tree.
//
// If blobocvnicza ID is specified, only this blobovnicza is processed.
// Otherwise, all Blobovniczas are processed descending weight.
-func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err error) {
+func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res common.DeleteRes, err error) {
+ var (
+ success = false
+ startedAt = time.Now()
+ )
+ defer func() {
+ b.metrics.Delete(time.Since(startedAt), success, prm.StorageID != nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Delete",
+ trace.WithAttributes(
+ attribute.String("path", b.rootPath),
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
if b.readOnly {
- return common.DeleteRes{}, common.ErrReadOnly
+ return deleteRes, common.ErrReadOnly
+ }
+
+ if b.rebuildGuard.TryRLock() {
+ defer b.rebuildGuard.RUnlock()
+ } else {
+ return deleteRes, errRebuildInProgress
+ }
+
+ if b.deleteProtectedObjects.Contains(prm.Address) {
+ return deleteRes, errObjectIsDeleteProtected
}
var bPrm blobovnicza.DeletePrm
bPrm.SetAddress(prm.Address)
if prm.StorageID != nil {
- id := blobovnicza.NewIDFromBytes(prm.StorageID)
- blz, err := b.openBlobovnicza(id.String())
+ id := NewIDFromBytes(prm.StorageID)
+ shBlz := b.getBlobovnicza(ctx, id.Path())
+ blz, err := shBlz.Open(ctx)
if err != nil {
return res, err
}
+ defer shBlz.Close(ctx)
- return b.deleteObject(blz, bPrm, prm)
+ if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
+ success = true
+ }
+ return res, err
}
- activeCache := make(map[string]struct{})
objectFound := false
- err = b.iterateSortedLeaves(&prm.Address, func(p string) (bool, error) {
- dirPath := filepath.Dir(p)
-
- // don't process active blobovnicza of the level twice
- _, ok := activeCache[dirPath]
-
- res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm)
+ err = b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) {
+ res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
if err != nil {
- if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not remove object from level",
+ if !client.IsErrObjectNotFound(err) {
+ b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
zap.String("level", p),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
}
- activeCache[dirPath] = struct{}{}
-
if err == nil {
objectFound = true
}
@@ -63,8 +99,9 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
if err == nil && !objectFound {
// not found in any blobovnicza
- return common.DeleteRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
+ success = err == nil
return
}
@@ -72,63 +109,19 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
// tries to delete object from particular blobovnicza.
//
// returns no error if object was removed from some blobovnicza of the same level.
-func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath string, tryActive bool, dp common.DeletePrm) (common.DeleteRes, error) {
- lvlPath := filepath.Dir(blzPath)
-
- // try to remove from blobovnicza if it is opened
- b.lruMtx.Lock()
- v, ok := b.opened.Get(blzPath)
- b.lruMtx.Unlock()
- if ok {
- if res, err := b.deleteObject(v, prm, dp); err == nil {
- return res, err
- } else if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not remove object from opened blobovnicza",
- zap.String("path", blzPath),
- zap.String("error", err.Error()),
- )
- }
- }
-
- // therefore the object is possibly placed in a lighter blobovnicza
-
- // next we check in the active level blobobnicza:
- // * the active blobovnicza is always opened.
- b.activeMtx.RLock()
- active, ok := b.active[lvlPath]
- b.activeMtx.RUnlock()
-
- if ok && tryActive {
- if res, err := b.deleteObject(active.blz, prm, dp); err == nil {
- return res, err
- } else if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not remove object from active blobovnicza",
- zap.String("path", blzPath),
- zap.String("error", err.Error()),
- )
- }
- }
-
- // then object is possibly placed in closed blobovnicza
-
- // check if it makes sense to try to open the blob
- // (Blobovniczas "after" the active one are empty anyway,
- // and it's pointless to open them).
- if u64FromHexString(filepath.Base(blzPath)) > active.ind {
- return common.DeleteRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
- }
-
- // open blobovnicza (cached inside)
- blz, err := b.openBlobovnicza(blzPath)
+func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) {
+ shBlz := b.getBlobovnicza(ctx, blzPath)
+ blz, err := shBlz.Open(ctx)
if err != nil {
- return common.DeleteRes{}, err
+ return deleteRes, err
}
+ defer shBlz.Close(ctx)
- return b.deleteObject(blz, prm, dp)
+ return b.deleteObject(ctx, blz, prm)
}
// removes object from blobovnicza and returns common.DeleteRes.
-func (b *Blobovniczas) deleteObject(blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm, dp common.DeletePrm) (common.DeleteRes, error) {
- _, err := blz.Delete(prm)
- return common.DeleteRes{}, err
+func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) {
+ _, err := blz.Delete(ctx, prm)
+ return deleteRes, err
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/errors.go b/pkg/local_object_storage/blobstor/blobovniczatree/errors.go
index 4ef053d546..be0fd81c30 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/errors.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/errors.go
@@ -7,8 +7,11 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
+var errClosed = logicerr.New("blobvnicza is closed")
+
func isErrOutOfRange(err error) bool {
- return errors.As(err, new(apistatus.ObjectOutOfRange))
+ var target *apistatus.ObjectOutOfRange
+ return errors.As(err, &target)
}
func isLogical(err error) bool {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
index e13e49351f..0c5e48821b 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
@@ -1,50 +1,69 @@
package blobovniczatree
import (
- "path/filepath"
+ "context"
+ "encoding/hex"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
// Exists implements common.Storage.
-func (b *Blobovniczas) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
+func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ found = false
+ )
+ defer func() {
+ b.metrics.Exists(time.Since(startedAt), success, prm.StorageID != nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Exists",
+ trace.WithAttributes(
+ attribute.String("path", b.rootPath),
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
if prm.StorageID != nil {
- id := blobovnicza.NewIDFromBytes(prm.StorageID)
- blz, err := b.openBlobovnicza(id.String())
+ id := NewIDFromBytes(prm.StorageID)
+ shBlz := b.getBlobovnicza(ctx, id.Path())
+ blz, err := shBlz.Open(ctx)
if err != nil {
return common.ExistsRes{}, err
}
+ defer shBlz.Close(ctx)
- exists, err := blz.Exists(prm.Address)
+ exists, err := blz.Exists(ctx, prm.Address)
return common.ExistsRes{Exists: exists}, err
}
- activeCache := make(map[string]struct{})
-
var gPrm blobovnicza.GetPrm
gPrm.SetAddress(prm.Address)
- var found bool
- err := b.iterateSortedLeaves(&prm.Address, func(p string) (bool, error) {
- dirPath := filepath.Dir(p)
-
- _, ok := activeCache[dirPath]
-
- _, err := b.getObjectFromLevel(gPrm, p, !ok)
+ err := b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) {
+ _, err := b.getObjectFromLevel(ctx, gPrm, p)
if err != nil {
- if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not get object from level",
+ if !client.IsErrObjectNotFound(err) {
+ b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.String("error", err.Error()))
+ zap.Error(err))
}
}
- activeCache[dirPath] = struct{}{}
found = err == nil
return found, nil
})
+ success = err == nil
return common.ExistsRes{Exists: found}, err
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
index 8b6db9fa7c..df2b4ffe53 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
@@ -1,6 +1,8 @@
package blobovniczatree
import (
+ "bytes"
+ "context"
"os"
"path/filepath"
"testing"
@@ -8,57 +10,57 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/nspcc-dev/neo-go/pkg/util/slice"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
)
func TestExistsInvalidStorageID(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
WithRootPath(dir),
WithBlobovniczaSize(1<<20))
- require.NoError(t, b.Open(false))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
require.NoError(t, b.Init())
- t.Cleanup(func() { _ = b.Close() })
+ defer func() { require.NoError(t, b.Close(context.Background())) }()
obj := blobstortest.NewObject(1024)
addr := object.AddressOf(obj)
d, err := obj.Marshal()
require.NoError(t, err)
- putRes, err := b.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true})
+ putRes, err := b.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true})
require.NoError(t, err)
t.Run("valid but wrong storage id", func(t *testing.T) {
// "0/X/Y" <-> "1/X/Y"
- storageID := slice.Copy(putRes.StorageID)
+ storageID := bytes.Clone(putRes.StorageID)
if storageID[0] == '0' {
storageID[0]++
} else {
storageID[0]--
}
- res, err := b.Exists(common.ExistsPrm{Address: addr, StorageID: storageID})
+ res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: storageID})
require.NoError(t, err)
require.False(t, res.Exists)
})
- t.Run("invalid storage id", func(t *testing.T) {
- // "0/X/Y" <-> "1/X/Y"
- storageID := slice.Copy(putRes.StorageID)
- storageID[0] = '9'
- badDir := filepath.Join(dir, "9")
- require.NoError(t, os.MkdirAll(badDir, os.ModePerm))
- require.NoError(t, os.Chmod(badDir, 0))
- t.Cleanup(func() { _ = os.Chmod(filepath.Join(dir, "9"), os.ModePerm) })
+ t.Run("valid id but corrupted file", func(t *testing.T) {
+ relBadFileDir := filepath.Join("9", "0")
+ badFileName := "0"
- res, err := b.Exists(common.ExistsPrm{Address: addr, StorageID: storageID})
+ // An invalid boltdb file is created so that it returns an error when opened
+ require.NoError(t, os.MkdirAll(filepath.Join(dir, relBadFileDir), os.ModePerm))
+ require.NoError(t, os.WriteFile(filepath.Join(dir, relBadFileDir, badFileName+".db"), []byte("not a boltdb file content"), 0o777))
+
+ res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: []byte(filepath.Join(relBadFileDir, badFileName))})
require.Error(t, err)
require.False(t, res.Exists)
})
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
index f1bc0b225b..9244d765cc 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
@@ -1,25 +1,22 @@
package blobovniczatree
import (
- "os"
- "path/filepath"
- "strconv"
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap/zaptest"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
)
func TestGeneric(t *testing.T) {
const maxObjectSize = 1 << 16
- defer func() { _ = os.RemoveAll(t.Name()) }()
-
helper := func(t *testing.T, dir string) common.Storage {
return NewBlobovniczaTree(
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
@@ -27,16 +24,14 @@ func TestGeneric(t *testing.T) {
WithBlobovniczaSize(1<<20))
}
- var n int
newTree := func(t *testing.T) common.Storage {
- dir := filepath.Join(t.Name(), strconv.Itoa(n))
- return helper(t, dir)
+ return helper(t, t.TempDir())
}
blobstortest.TestAll(t, newTree, 1024, maxObjectSize)
t.Run("info", func(t *testing.T) {
- dir := filepath.Join(t.Name(), "info")
+ dir := t.TempDir()
blobstortest.TestInfo(t, func(t *testing.T) common.Storage {
return helper(t, dir)
}, Type, dir)
@@ -46,17 +41,15 @@ func TestGeneric(t *testing.T) {
func TestControl(t *testing.T) {
const maxObjectSize = 2048
- defer func() { _ = os.RemoveAll(t.Name()) }()
-
- var n int
newTree := func(t *testing.T) common.Storage {
- dir := filepath.Join(t.Name(), strconv.Itoa(n))
return NewBlobovniczaTree(
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
- WithRootPath(dir),
+ WithRootPath(t.TempDir()),
WithBlobovniczaSize(1<<20))
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
index 89ea9b6418..e5c83e5f2c 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
@@ -1,14 +1,21 @@
package blobovniczatree
import (
+ "context"
+ "encoding/hex"
"fmt"
- "path/filepath"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -16,113 +23,89 @@ import (
//
// If blobocvnicza ID is specified, only this blobovnicza is processed.
// Otherwise, all Blobovniczas are processed descending weight.
-func (b *Blobovniczas) Get(prm common.GetPrm) (res common.GetRes, err error) {
+func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.GetRes, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ size = 0
+ )
+ defer func() {
+ b.metrics.Get(time.Since(startedAt), size, success, prm.StorageID != nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Get",
+ trace.WithAttributes(
+ attribute.String("path", b.rootPath),
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ attribute.Bool("raw", prm.Raw),
+ ))
+ defer span.End()
+
var bPrm blobovnicza.GetPrm
bPrm.SetAddress(prm.Address)
if prm.StorageID != nil {
- id := blobovnicza.NewIDFromBytes(prm.StorageID)
- blz, err := b.openBlobovnicza(id.String())
+ id := NewIDFromBytes(prm.StorageID)
+ shBlz := b.getBlobovnicza(ctx, id.Path())
+ blz, err := shBlz.Open(ctx)
if err != nil {
return res, err
}
+ defer shBlz.Close(ctx)
- return b.getObject(blz, bPrm)
+ res, err = b.getObject(ctx, blz, bPrm)
+ if err == nil {
+ success = true
+ size = len(res.RawData)
+ }
+ return res, err
}
- activeCache := make(map[string]struct{})
-
- err = b.iterateSortedLeaves(&prm.Address, func(p string) (bool, error) {
- dirPath := filepath.Dir(p)
-
- _, ok := activeCache[dirPath]
-
- res, err = b.getObjectFromLevel(bPrm, p, !ok)
+ err = b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) {
+ res, err = b.getObjectFromLevel(ctx, bPrm, p)
if err != nil {
- if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not get object from level",
+ if !client.IsErrObjectNotFound(err) {
+ b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
}
- activeCache[dirPath] = struct{}{}
-
// abort iterator if found, otherwise process all Blobovniczas
return err == nil, nil
})
if err == nil && res.Object == nil {
// not found in any blobovnicza
- return res, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return res, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
+ success = true
+ size = len(res.RawData)
+
return
}
// tries to read object from particular blobovnicza.
//
// returns error if object could not be read from any blobovnicza of the same level.
-func (b *Blobovniczas) getObjectFromLevel(prm blobovnicza.GetPrm, blzPath string, tryActive bool) (common.GetRes, error) {
- lvlPath := filepath.Dir(blzPath)
-
- // try to read from blobovnicza if it is opened
- b.lruMtx.Lock()
- v, ok := b.opened.Get(blzPath)
- b.lruMtx.Unlock()
- if ok {
- if res, err := b.getObject(v, prm); err == nil {
- return res, err
- } else if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not read object from opened blobovnicza",
- zap.String("path", blzPath),
- zap.String("error", err.Error()),
- )
- }
- }
-
- // therefore the object is possibly placed in a lighter blobovnicza
-
- // next we check in the active level blobobnicza:
- // * the freshest objects are probably the most demanded;
- // * the active blobovnicza is always opened.
- b.activeMtx.RLock()
- active, ok := b.active[lvlPath]
- b.activeMtx.RUnlock()
-
- if ok && tryActive {
- if res, err := b.getObject(active.blz, prm); err == nil {
- return res, err
- } else if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not get object from active blobovnicza",
- zap.String("path", blzPath),
- zap.String("error", err.Error()),
- )
- }
- }
-
- // then object is possibly placed in closed blobovnicza
-
- // check if it makes sense to try to open the blob
- // (Blobovniczas "after" the active one are empty anyway,
- // and it's pointless to open them).
- if u64FromHexString(filepath.Base(blzPath)) > active.ind {
- return common.GetRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
- }
-
+func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) {
// open blobovnicza (cached inside)
- blz, err := b.openBlobovnicza(blzPath)
+ shBlz := b.getBlobovnicza(ctx, blzPath)
+ blz, err := shBlz.Open(ctx)
if err != nil {
return common.GetRes{}, err
}
+ defer shBlz.Close(ctx)
- return b.getObject(blz, prm)
+ return b.getObject(ctx, blz, prm)
}
// reads object from blobovnicza and returns GetSmallRes.
-func (b *Blobovniczas) getObject(blz *blobovnicza.Blobovnicza, prm blobovnicza.GetPrm) (common.GetRes, error) {
- res, err := blz.Get(prm)
+func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.GetPrm) (common.GetRes, error) {
+ res, err := blz.Get(ctx, prm)
if err != nil {
return common.GetRes{}, err
}
@@ -130,13 +113,13 @@ func (b *Blobovniczas) getObject(blz *blobovnicza.Blobovnicza, prm blobovnicza.G
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
- return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
+ return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
+ return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
index 29df23944f..27d13f4f30 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
@@ -1,14 +1,22 @@
package blobovniczatree
import (
+ "context"
+ "encoding/hex"
"fmt"
- "path/filepath"
+ "strconv"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -16,41 +24,59 @@ import (
//
// If blobocvnicza ID is specified, only this blobovnicza is processed.
// Otherwise, all Blobovniczas are processed descending weight.
-func (b *Blobovniczas) GetRange(prm common.GetRangePrm) (res common.GetRangeRes, err error) {
+func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (res common.GetRangeRes, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ size = 0
+ )
+ defer func() {
+ b.metrics.GetRange(time.Since(startedAt), size, success, prm.StorageID != nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.GetRange",
+ trace.WithAttributes(
+ attribute.String("path", b.rootPath),
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)),
+ attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)),
+ ))
+ defer span.End()
+
if prm.StorageID != nil {
- id := blobovnicza.NewIDFromBytes(prm.StorageID)
- blz, err := b.openBlobovnicza(id.String())
+ id := NewIDFromBytes(prm.StorageID)
+ shBlz := b.getBlobovnicza(ctx, id.Path())
+ blz, err := shBlz.Open(ctx)
if err != nil {
return common.GetRangeRes{}, err
}
+ defer shBlz.Close(ctx)
- return b.getObjectRange(blz, prm)
+ res, err := b.getObjectRange(ctx, blz, prm)
+ if err == nil {
+ size = len(res.Data)
+ success = true
+ }
+ return res, err
}
- activeCache := make(map[string]struct{})
objectFound := false
- err = b.iterateSortedLeaves(&prm.Address, func(p string) (bool, error) {
- dirPath := filepath.Dir(p)
-
- _, ok := activeCache[dirPath]
-
- res, err = b.getRangeFromLevel(prm, p, !ok)
+ err = b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) {
+ res, err = b.getRangeFromLevel(ctx, prm, p)
if err != nil {
outOfBounds := isErrOutOfRange(err)
- if !outOfBounds && !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not get object from level",
+ if !outOfBounds && !client.IsErrObjectNotFound(err) {
+ b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.String("error", err.Error()),
- )
+ zap.Error(err))
}
if outOfBounds {
return true, err
}
}
- activeCache[dirPath] = struct{}{}
-
objectFound = err == nil
// abort iterator if found, otherwise process all Blobovniczas
@@ -59,7 +85,12 @@ func (b *Blobovniczas) GetRange(prm common.GetRangePrm) (res common.GetRangeRes,
if err == nil && !objectFound {
// not found in any blobovnicza
- return common.GetRangeRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+
+ if err == nil {
+ success = true
+ size = len(res.Data)
}
return
@@ -68,74 +99,20 @@ func (b *Blobovniczas) GetRange(prm common.GetRangePrm) (res common.GetRangeRes,
// tries to read range of object payload data from particular blobovnicza.
//
// returns error if object could not be read from any blobovnicza of the same level.
-func (b *Blobovniczas) getRangeFromLevel(prm common.GetRangePrm, blzPath string, tryActive bool) (common.GetRangeRes, error) {
- lvlPath := filepath.Dir(blzPath)
-
- // try to read from blobovnicza if it is opened
- b.lruMtx.Lock()
- v, ok := b.opened.Get(blzPath)
- b.lruMtx.Unlock()
- if ok {
- res, err := b.getObjectRange(v, prm)
- switch {
- case err == nil,
- isErrOutOfRange(err):
- return res, err
- default:
- if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not read payload range from opened blobovnicza",
- zap.String("path", blzPath),
- zap.String("error", err.Error()),
- )
- }
- }
- }
-
- // therefore the object is possibly placed in a lighter blobovnicza
-
- // next we check in the active level blobobnicza:
- // * the freshest objects are probably the most demanded;
- // * the active blobovnicza is always opened.
- b.activeMtx.RLock()
- active, ok := b.active[lvlPath]
- b.activeMtx.RUnlock()
-
- if ok && tryActive {
- res, err := b.getObjectRange(active.blz, prm)
- switch {
- case err == nil,
- isErrOutOfRange(err):
- return res, err
- default:
- if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not read payload range from active blobovnicza",
- zap.String("path", blzPath),
- zap.String("error", err.Error()),
- )
- }
- }
- }
-
- // then object is possibly placed in closed blobovnicza
-
- // check if it makes sense to try to open the blob
- // (Blobovniczas "after" the active one are empty anyway,
- // and it's pointless to open them).
- if u64FromHexString(filepath.Base(blzPath)) > active.ind {
- return common.GetRangeRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
- }
-
+func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) {
// open blobovnicza (cached inside)
- blz, err := b.openBlobovnicza(blzPath)
+ shBlz := b.getBlobovnicza(ctx, blzPath)
+ blz, err := shBlz.Open(ctx)
if err != nil {
return common.GetRangeRes{}, err
}
+ defer shBlz.Close(ctx)
- return b.getObjectRange(blz, prm)
+ return b.getObjectRange(ctx, blz, prm)
}
// reads range of object payload data from blobovnicza and returns GetRangeSmallRes.
-func (b *Blobovniczas) getObjectRange(blz *blobovnicza.Blobovnicza, prm common.GetRangePrm) (common.GetRangeRes, error) {
+func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blobovnicza, prm common.GetRangePrm) (common.GetRangeRes, error) {
var gPrm blobovnicza.GetPrm
gPrm.SetAddress(prm.Address)
@@ -143,7 +120,7 @@ func (b *Blobovniczas) getObjectRange(blz *blobovnicza.Blobovnicza, prm common.G
// stores data that is compressed on BlobStor side.
// If blobovnicza learns to do the compression itself,
// we can start using GetRange.
- res, err := blz.Get(gPrm)
+ res, err := blz.Get(ctx, gPrm)
if err != nil {
return common.GetRangeRes{}, err
}
@@ -151,13 +128,13 @@ func (b *Blobovniczas) getObjectRange(blz *blobovnicza.Blobovnicza, prm common.G
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
- return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err)
+ return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
+ return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err)
}
from := prm.Range.GetOffset()
@@ -165,7 +142,7 @@ func (b *Blobovniczas) getObjectRange(blz *blobovnicza.Blobovnicza, prm common.G
payload := obj.Payload()
if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to {
- return common.GetRangeRes{}, logicerr.Wrap(apistatus.ObjectOutOfRange{})
+ return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange))
}
return common.GetRangeRes{
diff --git a/pkg/local_object_storage/blobovnicza/id.go b/pkg/local_object_storage/blobstor/blobovniczatree/id.go
similarity index 71%
rename from pkg/local_object_storage/blobovnicza/id.go
rename to pkg/local_object_storage/blobstor/blobovniczatree/id.go
index 3d3ccf8b99..a080819bcf 100644
--- a/pkg/local_object_storage/blobovnicza/id.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/id.go
@@ -1,4 +1,4 @@
-package blobovnicza
+package blobovniczatree
// ID represents Blobovnicza identifier.
type ID []byte
@@ -8,8 +8,8 @@ func NewIDFromBytes(v []byte) *ID {
return (*ID)(&v)
}
-func (id ID) String() string {
- return string(id)
+func (id ID) Path() string {
+ return string(id) + dbExtension
}
func (id ID) Bytes() []byte {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
index 9918801b92..ceb8fb7e3b 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
@@ -1,59 +1,90 @@
package blobovniczatree
import (
+ "context"
"fmt"
+ "os"
"path/filepath"
+ "strings"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
)
// Iterate iterates over all objects in b.
-func (b *Blobovniczas) Iterate(prm common.IteratePrm) (common.IterateRes, error) {
- return common.IterateRes{}, b.iterateBlobovniczas(prm.IgnoreErrors, func(p string, blz *blobovnicza.Blobovnicza) error {
+func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (common.IterateRes, error) {
+ var (
+ startedAt = time.Now()
+ err error
+ )
+ defer func() {
+ b.metrics.Iterate(time.Since(startedAt), err == nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Iterate",
+ trace.WithAttributes(
+ attribute.String("path", b.rootPath),
+ attribute.Bool("ignore_errors", prm.IgnoreErrors),
+ ))
+ defer span.End()
+
+ err = b.iterateBlobovniczas(ctx, prm.IgnoreErrors, func(p string, blz *blobovnicza.Blobovnicza) error {
var subPrm blobovnicza.IteratePrm
subPrm.SetHandler(func(elem blobovnicza.IterationElement) error {
data, err := b.compression.Decompress(elem.ObjectData())
if err != nil {
if prm.IgnoreErrors {
- if prm.ErrorHandler != nil {
- return prm.ErrorHandler(elem.Address(), err)
- }
+ b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.Stringer("address", elem.Address()),
+ zap.Error(err),
+ zap.String("storage_id", p),
+ zap.String("root_path", b.rootPath))
return nil
}
- return fmt.Errorf("could not decompress object data: %w", err)
+ return fmt.Errorf("decompress object data: %w", err)
}
if prm.Handler != nil {
return prm.Handler(common.IterationElement{
Address: elem.Address(),
ObjectData: data,
- StorageID: []byte(p),
+ StorageID: []byte(strings.TrimSuffix(p, dbExtension)),
})
}
- return prm.LazyHandler(elem.Address(), func() ([]byte, error) {
- return data, err
- })
+ return nil
})
subPrm.DecodeAddresses()
- _, err := blz.Iterate(subPrm)
+ _, err := blz.Iterate(ctx, subPrm)
return err
})
+ return common.IterateRes{}, err
}
// iterator over all Blobovniczas in unsorted order. Break on f's error return.
-func (b *Blobovniczas) iterateBlobovniczas(ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
- return b.iterateLeaves(func(p string) (bool, error) {
- blz, err := b.openBlobovnicza(p)
+func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
+ return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
+ shBlz := b.getBlobovnicza(ctx, p)
+ blz, err := shBlz.Open(ctx)
if err != nil {
if ignoreErrors {
+ b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.Error(err),
+ zap.String("storage_id", p),
+ zap.String("root_path", b.rootPath))
return false, nil
}
- return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
+ return false, fmt.Errorf("open blobovnicza %s: %w", p, err)
}
+ defer shBlz.Close(ctx)
err = f(p, blz)
@@ -61,9 +92,12 @@ func (b *Blobovniczas) iterateBlobovniczas(ignoreErrors bool, f func(string, *bl
})
}
-// iterator over the paths of Blobovniczas sorted by weight.
-func (b *Blobovniczas) iterateSortedLeaves(addr *oid.Address, f func(string) (bool, error)) error {
+// iterateSortedLeaves iterates over the paths of Blobovniczas sorted by weight.
+//
+// Uses depth, width and leaf width for iteration.
+func (b *Blobovniczas) iterateSortedLeaves(ctx context.Context, addr *oid.Address, f func(string) (bool, error)) error {
_, err := b.iterateSorted(
+ ctx,
addr,
make([]string, 0, b.blzShallowDepth),
b.blzShallowDepth,
@@ -74,13 +108,14 @@ func (b *Blobovniczas) iterateSortedLeaves(addr *oid.Address, f func(string) (bo
}
// iterator over directories with Blobovniczas sorted by weight.
-func (b *Blobovniczas) iterateDeepest(addr oid.Address, f func(string) (bool, error)) error {
+func (b *Blobovniczas) iterateDeepest(ctx context.Context, addr oid.Address, f func(string) (bool, error)) error {
depth := b.blzShallowDepth
if depth > 0 {
depth--
}
_, err := b.iterateSorted(
+ ctx,
&addr,
make([]string, 0, depth),
depth,
@@ -91,18 +126,43 @@ func (b *Blobovniczas) iterateDeepest(addr oid.Address, f func(string) (bool, er
}
// iterator over particular level of directories.
-func (b *Blobovniczas) iterateSorted(addr *oid.Address, curPath []string, execDepth uint64, f func([]string) (bool, error)) (bool, error) {
- indices := indexSlice(b.blzShallowWidth)
+func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, curPath []string, execDepth uint64, f func([]string) (bool, error)) (bool, error) {
+ isLeafLevel := uint64(len(curPath)) == b.blzShallowDepth
+ levelWidth := b.blzShallowWidth
+ if isLeafLevel {
+ hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(append([]string{b.rootPath}, curPath...)...))
+ if err != nil {
+ return false, err
+ }
+ levelWidth = 0
+ if hasDBs {
+ levelWidth = maxIdx + 1
+ }
+ }
+ indices := indexSlice(levelWidth)
- hrw.SortSliceByValue(indices, addressHash(addr, filepath.Join(curPath...)))
+ if !isLeafLevel {
+ hrw.SortSliceByValue(indices, addressHash(addr, filepath.Join(curPath...)))
+ }
exec := uint64(len(curPath)) == execDepth
for i := range indices {
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ default:
+ }
+
+ lastPart := u64ToHexString(indices[i])
+ if isLeafLevel {
+ lastPart = u64ToHexStringExt(indices[i])
+ }
+
if i == 0 {
- curPath = append(curPath, u64ToHexString(indices[i]))
+ curPath = append(curPath, lastPart)
} else {
- curPath[len(curPath)-1] = u64ToHexString(indices[i])
+ curPath[len(curPath)-1] = lastPart
}
if exec {
@@ -111,7 +171,7 @@ func (b *Blobovniczas) iterateSorted(addr *oid.Address, curPath []string, execDe
} else if stop {
return true, nil
}
- } else if stop, err := b.iterateSorted(addr, curPath, execDepth, f); err != nil {
+ } else if stop, err := b.iterateSorted(ctx, addr, curPath, execDepth, f); err != nil {
return false, err
} else if stop {
return true, nil
@@ -121,9 +181,131 @@ func (b *Blobovniczas) iterateSorted(addr *oid.Address, curPath []string, execDe
return false, nil
}
-// iterator over the paths of Blobovniczas in random order.
-func (b *Blobovniczas) iterateLeaves(f func(string) (bool, error)) error {
- return b.iterateSortedLeaves(nil, f)
+// iterateExistingDBPaths iterates over the paths of Blobovniczas without any order.
+//
+// Uses existed blobovnicza files for iteration.
+func (b *Blobovniczas) iterateExistingDBPaths(ctx context.Context, f func(string) (bool, error)) error {
+ b.dbFilesGuard.RLock()
+ defer b.dbFilesGuard.RUnlock()
+
+ _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return !strings.HasSuffix(path, rebuildSuffix) })
+ return err
+}
+
+func (b *Blobovniczas) iterateExistingPathsDFS(ctx context.Context, path string, f func(string) (bool, error), fileFilter func(path string) bool) (bool, error) {
+ sysPath := filepath.Join(b.rootPath, path)
+ entries, err := os.ReadDir(sysPath)
+ if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ for _, entry := range entries {
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ default:
+ }
+ if entry.IsDir() {
+ stop, err := b.iterateExistingPathsDFS(ctx, filepath.Join(path, entry.Name()), f, fileFilter)
+ if err != nil {
+ return false, err
+ }
+ if stop {
+ return true, nil
+ }
+ } else {
+ if !fileFilter(entry.Name()) {
+ continue
+ }
+ stop, err := f(filepath.Join(path, entry.Name()))
+ if err != nil {
+ return false, err
+ }
+ if stop {
+ return true, nil
+ }
+ }
+ }
+ return false, nil
+}
+
+// iterateIncompletedRebuildDBPaths iterates over the paths of Blobovniczas with incompleted rebuild files without any order.
+func (b *Blobovniczas) iterateIncompletedRebuildDBPaths(ctx context.Context, f func(string) (bool, error)) error {
+ b.dbFilesGuard.RLock()
+ defer b.dbFilesGuard.RUnlock()
+
+ _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return strings.HasSuffix(path, rebuildSuffix) })
+ return err
+}
+
+func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Address, f func(string) (bool, error)) error {
+ b.dbFilesGuard.RLock()
+ defer b.dbFilesGuard.RUnlock()
+
+ _, err := b.iterateSordedDBPathsInternal(ctx, "", addr, f)
+ return err
+}
+
+func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) {
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ default:
+ }
+
+ sysPath := filepath.Join(b.rootPath, path)
+ entries, err := os.ReadDir(sysPath)
+ if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ var dbIdxs []uint64
+ var dirIdxs []uint64
+
+ for _, entry := range entries {
+ if strings.HasSuffix(entry.Name(), rebuildSuffix) {
+ continue
+ }
+ idx := u64FromHexString(entry.Name())
+ if entry.IsDir() {
+ dirIdxs = append(dirIdxs, idx)
+ } else {
+ dbIdxs = append(dbIdxs, idx)
+ }
+ }
+
+ if len(dbIdxs) > 0 {
+ for _, dbIdx := range dbIdxs {
+ dbPath := filepath.Join(path, u64ToHexStringExt(dbIdx))
+ stop, err := f(dbPath)
+ if err != nil {
+ return false, err
+ }
+ if stop {
+ return true, nil
+ }
+ }
+ }
+
+ if len(dirIdxs) > 0 {
+ hrw.SortSliceByValue(dirIdxs, addressHash(&addr, path))
+ for _, dirIdx := range dirIdxs {
+ dirPath := filepath.Join(path, u64ToHexString(dirIdx))
+ stop, err := b.iterateSordedDBPathsInternal(ctx, dirPath, addr, f)
+ if err != nil {
+ return false, err
+ }
+ if stop {
+ return true, nil
+ }
+ }
+ }
+
+ return false, nil
}
// makes slice of uint64 values from 0 to number-1.
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
new file mode 100644
index 0000000000..6438f715b9
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
@@ -0,0 +1,336 @@
+package blobovniczatree
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
+)
+
+var errClosingClosedBlobovnicza = errors.New("closing closed blobovnicza is not allowed")
+
+// sharedDB is responsible for opening and closing a file of single blobovnicza.
+type sharedDB struct {
+ cond *sync.Cond
+ blcza *blobovnicza.Blobovnicza
+ refCount uint32
+
+ openDBCounter *openDBCounter
+ closedFlag *atomic.Bool
+ options []blobovnicza.Option
+ path string
+ readOnly bool
+ metrics blobovnicza.Metrics
+ log *logger.Logger
+}
+
+func newSharedDB(options []blobovnicza.Option, path string, readOnly bool,
+ metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger,
+) *sharedDB {
+ return &sharedDB{
+ cond: &sync.Cond{
+ L: &sync.RWMutex{},
+ },
+ options: options,
+ path: path,
+ readOnly: readOnly,
+ metrics: metrics,
+ closedFlag: closedFlag,
+ log: log,
+ openDBCounter: openDBCounter,
+ }
+}
+
+func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
+ if b.closedFlag.Load() {
+ return nil, errClosed
+ }
+
+ b.cond.L.Lock()
+ defer b.cond.L.Unlock()
+
+ if b.refCount > 0 {
+ b.refCount++
+ return b.blcza, nil
+ }
+
+ blz := blobovnicza.New(append(b.options,
+ blobovnicza.WithReadOnly(b.readOnly),
+ blobovnicza.WithPath(b.path),
+ blobovnicza.WithMetrics(b.metrics),
+ )...)
+
+ if err := blz.Open(ctx); err != nil {
+ return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err)
+ }
+ if err := blz.Init(ctx); err != nil {
+ return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err)
+ }
+
+ b.refCount++
+ b.blcza = blz
+ b.openDBCounter.Inc()
+
+ return blz, nil
+}
+
+func (b *sharedDB) Close(ctx context.Context) {
+ b.cond.L.Lock()
+ defer b.cond.L.Unlock()
+
+ if b.refCount == 0 {
+ b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
+ b.cond.Broadcast()
+ return
+ }
+
+ if b.refCount == 1 {
+ b.refCount = 0
+ if err := b.blcza.Close(ctx); err != nil {
+ b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
+ zap.String("id", b.path),
+ zap.Error(err),
+ )
+ }
+ b.blcza = nil
+ b.openDBCounter.Dec()
+ return
+ }
+
+ b.refCount--
+ if b.refCount == 1 {
+ b.cond.Broadcast()
+ }
+}
+
+func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
+ b.cond.L.Lock()
+ if b.refCount > 1 {
+ b.cond.Wait()
+ }
+ defer b.cond.L.Unlock()
+
+ if b.refCount == 0 {
+ return errClosingClosedBlobovnicza
+ }
+
+ if err := b.blcza.Close(ctx); err != nil {
+ b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
+ zap.String("id", b.path),
+ zap.Error(err),
+ )
+ return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err)
+ }
+
+ b.refCount = 0
+ b.blcza = nil
+ b.openDBCounter.Dec()
+
+ return os.Remove(b.path)
+}
+
+func (b *sharedDB) SystemPath() string {
+ return b.path
+}
+
+// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
+type levelDBManager struct {
+ dbMtx *sync.RWMutex
+ databases map[uint64]*sharedDB
+
+ options []blobovnicza.Option
+ path string
+ readOnly bool
+ metrics blobovnicza.Metrics
+ openDBCounter *openDBCounter
+ closedFlag *atomic.Bool
+ log *logger.Logger
+}
+
+func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string,
+ readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger,
+) *levelDBManager {
+ result := &levelDBManager{
+ databases: make(map[uint64]*sharedDB),
+ dbMtx: &sync.RWMutex{},
+
+ options: options,
+ path: filepath.Join(rootPath, lvlPath),
+ readOnly: readOnly,
+ metrics: metrics,
+ openDBCounter: openDBCounter,
+ closedFlag: closedFlag,
+ log: log,
+ }
+ return result
+}
+
+func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB {
+ res := m.getDBIfExists(idx)
+ if res != nil {
+ return res
+ }
+ return m.getOrCreateDB(idx)
+}
+
+func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB {
+ m.dbMtx.RLock()
+ defer m.dbMtx.RUnlock()
+
+ return m.databases[idx]
+}
+
+func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB {
+ m.dbMtx.Lock()
+ defer m.dbMtx.Unlock()
+
+ db := m.databases[idx]
+ if db != nil {
+ return db
+ }
+
+ db = newSharedDB(m.options, filepath.Join(m.path, u64ToHexStringExt(idx)), m.readOnly, m.metrics, m.openDBCounter, m.closedFlag, m.log)
+ m.databases[idx] = db
+ return db
+}
+
+func (m *levelDBManager) hasAnyDB() bool {
+ m.dbMtx.RLock()
+ defer m.dbMtx.RUnlock()
+
+ return len(m.databases) > 0
+}
+
+// dbManager manages the opening and closing of blobovnicza instances.
+//
+// The blobovnicza opens at the first request, closes after the last request.
+type dbManager struct {
+ levelToManager map[string]*levelDBManager
+ levelToManagerGuard *sync.RWMutex
+ closedFlag *atomic.Bool
+ dbCounter *openDBCounter
+
+ rootPath string
+ options []blobovnicza.Option
+ readOnly bool
+ metrics blobovnicza.Metrics
+ log *logger.Logger
+}
+
+func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool, metrics blobovnicza.Metrics, log *logger.Logger) *dbManager {
+ return &dbManager{
+ rootPath: rootPath,
+ options: options,
+ readOnly: readOnly,
+ metrics: metrics,
+ levelToManager: make(map[string]*levelDBManager),
+ levelToManagerGuard: &sync.RWMutex{},
+ log: log,
+ closedFlag: &atomic.Bool{},
+ dbCounter: newOpenDBCounter(),
+ }
+}
+
+func (m *dbManager) GetByPath(path string) *sharedDB {
+ lvlPath := filepath.Dir(path)
+ curIndex := u64FromHexString(filepath.Base(path))
+ levelManager := m.getLevelManager(lvlPath)
+ return levelManager.GetByIndex(curIndex)
+}
+
+func (m *dbManager) CleanResources(path string) {
+ lvlPath := filepath.Dir(path)
+
+ m.levelToManagerGuard.Lock()
+ defer m.levelToManagerGuard.Unlock()
+
+ if result, ok := m.levelToManager[lvlPath]; ok && !result.hasAnyDB() {
+ delete(m.levelToManager, lvlPath)
+ }
+}
+
+func (m *dbManager) Open() {
+ m.closedFlag.Store(false)
+}
+
+func (m *dbManager) Close() {
+ m.closedFlag.Store(true)
+ m.dbCounter.WaitUntilAllClosed()
+}
+
+func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager {
+ result := m.getLevelManagerIfExists(lvlPath)
+ if result != nil {
+ return result
+ }
+ return m.getOrCreateLevelManager(lvlPath)
+}
+
+func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager {
+ m.levelToManagerGuard.RLock()
+ defer m.levelToManagerGuard.RUnlock()
+
+ return m.levelToManager[lvlPath]
+}
+
+func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager {
+ m.levelToManagerGuard.Lock()
+ defer m.levelToManagerGuard.Unlock()
+
+ if result, ok := m.levelToManager[lvlPath]; ok {
+ return result
+ }
+
+ result := newLevelDBManager(m.options, m.rootPath, lvlPath, m.readOnly, m.metrics, m.dbCounter, m.closedFlag, m.log)
+ m.levelToManager[lvlPath] = result
+ return result
+}
+
+type openDBCounter struct {
+ cond *sync.Cond
+ count uint64
+}
+
+func newOpenDBCounter() *openDBCounter {
+ return &openDBCounter{
+ cond: &sync.Cond{
+ L: &sync.Mutex{},
+ },
+ }
+}
+
+func (c *openDBCounter) Inc() {
+ c.cond.L.Lock()
+ defer c.cond.L.Unlock()
+
+ c.count++
+}
+
+func (c *openDBCounter) Dec() {
+ c.cond.L.Lock()
+ defer c.cond.L.Unlock()
+
+ if c.count > 0 {
+ c.count--
+ }
+
+ if c.count == 0 {
+ c.cond.Broadcast()
+ }
+}
+
+func (c *openDBCounter) WaitUntilAllClosed() {
+ c.cond.L.Lock()
+ for c.count > 0 {
+ c.cond.Wait()
+ }
+ c.cond.L.Unlock()
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go b/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go
new file mode 100644
index 0000000000..68dc7ff381
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go
@@ -0,0 +1,55 @@
+package blobovniczatree
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+const (
+ rebuildStatusNotStarted = "not_started"
+ rebuildStatusRunning = "running"
+ rebuildStatusCompleted = "completed"
+ rebuildStatusFailed = "failed"
+)
+
+type Metrics interface {
+ Blobovnicza() blobovnicza.Metrics
+
+ SetParentID(parentID string)
+
+ SetMode(mode.ComponentMode)
+ Close()
+
+ SetRebuildStatus(status string)
+ ObjectMoved(d time.Duration)
+ SetRebuildPercent(value uint32)
+ ObjectsCount(d time.Duration, success bool)
+
+ Delete(d time.Duration, success, withStorageID bool)
+ Exists(d time.Duration, success, withStorageID bool)
+ GetRange(d time.Duration, size int, success, withStorageID bool)
+ Get(d time.Duration, size int, success, withStorageID bool)
+ Iterate(d time.Duration, success bool)
+ Put(d time.Duration, size int, success bool)
+}
+
+type noopMetrics struct{}
+
+func (m *noopMetrics) SetParentID(string) {}
+func (m *noopMetrics) SetMode(mode.ComponentMode) {}
+func (m *noopMetrics) Close() {}
+func (m *noopMetrics) SetRebuildStatus(string) {}
+func (m *noopMetrics) SetRebuildPercent(uint32) {}
+func (m *noopMetrics) ObjectMoved(time.Duration) {}
+func (m *noopMetrics) Delete(time.Duration, bool, bool) {}
+func (m *noopMetrics) Exists(time.Duration, bool, bool) {}
+func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {}
+func (m *noopMetrics) Get(time.Duration, int, bool, bool) {}
+func (m *noopMetrics) Iterate(time.Duration, bool) {}
+func (m *noopMetrics) Put(time.Duration, int, bool) {}
+func (m *noopMetrics) ObjectsCount(time.Duration, bool) {}
+func (m *noopMetrics) Blobovnicza() blobovnicza.Metrics {
+ return &blobovnicza.NoopMetrics{}
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
index 95ef8635a1..5f268b0f28 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
@@ -1,7 +1,9 @@
package blobovniczatree
import (
+ "context"
"io/fs"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
@@ -10,43 +12,66 @@ import (
)
type cfg struct {
- log *logger.Logger
- perm fs.FileMode
- readOnly bool
- rootPath string
- openedCacheSize int
- blzShallowDepth uint64
- blzShallowWidth uint64
- compression *compression.Config
- blzOpts []blobovnicza.Option
- // reportError is the function called when encountering disk errors.
- reportError func(string, error)
+ log *logger.Logger
+ perm fs.FileMode
+ readOnly bool
+ rootPath string
+ openedCacheSize int
+ blzShallowDepth uint64
+ blzShallowWidth uint64
+ compression *compression.Compressor
+ blzOpts []blobovnicza.Option
+ reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors.
+ metrics Metrics
+ waitBeforeDropDB time.Duration
+ blzInitWorkerCount int
+ blzMoveBatchSize int
+ // TTL for blobovnicza's cache
+ openedCacheTTL time.Duration
+ // Interval for deletion expired blobovnicza's
+ openedCacheExpInterval time.Duration
}
type Option func(*cfg)
const (
- defaultPerm = 0700
- defaultOpenedCacheSize = 50
- defaultBlzShallowDepth = 2
- defaultBlzShallowWidth = 16
+ defaultPerm = 0o700
+ defaultOpenedCacheSize = 50
+ defaultOpenedCacheTTL = 0 // means expiring is off
+ defaultOpenedCacheInterval = 15 * time.Second
+ defaultBlzShallowDepth = 2
+ defaultBlzShallowWidth = 16
+ defaultWaitBeforeDropDB = 10 * time.Second
+ defaultBlzInitWorkerCount = 5
+ defaulBlzMoveBatchSize = 10000
)
func initConfig(c *cfg) {
*c = cfg{
- log: &logger.Logger{Logger: zap.L()},
- perm: defaultPerm,
- openedCacheSize: defaultOpenedCacheSize,
- blzShallowDepth: defaultBlzShallowDepth,
- blzShallowWidth: defaultBlzShallowWidth,
- reportError: func(string, error) {},
+ log: logger.NewLoggerWrapper(zap.L()),
+ perm: defaultPerm,
+ openedCacheSize: defaultOpenedCacheSize,
+ openedCacheTTL: defaultOpenedCacheTTL,
+ openedCacheExpInterval: defaultOpenedCacheInterval,
+ blzShallowDepth: defaultBlzShallowDepth,
+ blzShallowWidth: defaultBlzShallowWidth,
+ reportError: func(context.Context, string, error) {},
+ metrics: &noopMetrics{},
+ waitBeforeDropDB: defaultWaitBeforeDropDB,
+ blzInitWorkerCount: defaultBlzInitWorkerCount,
+ blzMoveBatchSize: defaulBlzMoveBatchSize,
}
}
-func WithLogger(l *logger.Logger) Option {
+func WithBlobovniczaTreeLogger(log *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
- c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l))
+ c.log = log
+ }
+}
+
+func WithBlobovniczaLogger(log *logger.Logger) Option {
+ return func(c *cfg) {
+ c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log))
}
}
@@ -86,8 +111,50 @@ func WithOpenedCacheSize(sz int) Option {
}
}
+func WithOpenedCacheTTL(ttl time.Duration) Option {
+ return func(c *cfg) {
+ c.openedCacheTTL = ttl
+ }
+}
+
+func WithOpenedCacheExpInterval(expInterval time.Duration) Option {
+ return func(c *cfg) {
+ c.openedCacheExpInterval = expInterval
+ }
+}
+
func WithObjectSizeLimit(sz uint64) Option {
return func(c *cfg) {
c.blzOpts = append(c.blzOpts, blobovnicza.WithObjectSizeLimit(sz))
}
}
+
+func WithMetrics(m Metrics) Option {
+ return func(c *cfg) {
+ c.metrics = m
+ }
+}
+
+func WithWaitBeforeDropDB(t time.Duration) Option {
+ return func(c *cfg) {
+ c.waitBeforeDropDB = t
+ }
+}
+
+func WithMoveBatchSize(v int) Option {
+ return func(c *cfg) {
+ c.blzMoveBatchSize = v
+ }
+}
+
+// WithInitWorkerCount sets maximum workers count to init blobovnicza tree.
+//
+// Negative or zero value means no limit.
+func WithInitWorkerCount(v int) Option {
+ if v <= 0 {
+ v = -1
+ }
+ return func(c *cfg) {
+ c.blzInitWorkerCount = v
+ }
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
index 614d66fe92..37c49d7419 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
@@ -1,19 +1,40 @@
package blobovniczatree
import (
+ "context"
"errors"
"path/filepath"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "go.etcd.io/bbolt"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
// Put saves object in the maximum weight blobobnicza.
//
// returns error if could not save object in any blobovnicza.
-func (b *Blobovniczas) Put(prm common.PutPrm) (common.PutRes, error) {
+func (b *Blobovniczas) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
+ var (
+ success bool
+ size int
+ startedAt = time.Now()
+ )
+ defer func() {
+ b.metrics.Put(time.Since(startedAt), size, success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Put",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("dont_compress", prm.DontCompress),
+ ))
+ defer span.End()
+
if b.readOnly {
return common.PutRes{}, common.ErrReadOnly
}
@@ -21,82 +42,77 @@ func (b *Blobovniczas) Put(prm common.PutPrm) (common.PutRes, error) {
if !prm.DontCompress {
prm.RawData = b.compression.Compress(prm.RawData)
}
+ size = len(prm.RawData)
var putPrm blobovnicza.PutPrm
putPrm.SetAddress(prm.Address)
putPrm.SetMarshaledObject(prm.RawData)
- var (
- fn func(string) (bool, error)
- id *blobovnicza.ID
- allFull = true
- )
-
- fn = func(p string) (bool, error) {
- active, err := b.getActivated(p)
- if err != nil {
- if !isLogical(err) {
- b.reportError("could not get active blobovnicza", err)
- } else {
- b.log.Debug("could not get active blobovnicza",
- zap.String("error", err.Error()))
- }
-
- return false, nil
- }
-
- if _, err := active.blz.Put(putPrm); err != nil {
- // Check if blobovnicza is full. We could either receive `blobovnicza.ErrFull` error
- // or update active blobovnicza in other thread. In the latter case the database will be closed
- // and `updateActive` takes care of not updating the active blobovnicza twice.
- if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) {
- if isFull {
- b.log.Debug("blobovnicza overflowed",
- zap.String("path", filepath.Join(p, u64ToHexString(active.ind))))
- }
-
- if err := b.updateActive(p, &active.ind); err != nil {
- if !isLogical(err) {
- b.reportError("could not update active blobovnicza", err)
- } else {
- b.log.Debug("could not update active blobovnicza",
- zap.String("level", p),
- zap.String("error", err.Error()))
- }
-
- return false, nil
- }
-
- return fn(p)
- }
-
- allFull = false
- if !isLogical(err) {
- b.reportError("could not put object to active blobovnicza", err)
- } else {
- b.log.Debug("could not put object to active blobovnicza",
- zap.String("path", filepath.Join(p, u64ToHexString(active.ind))),
- zap.String("error", err.Error()))
- }
-
- return false, nil
- }
-
- p = filepath.Join(p, u64ToHexString(active.ind))
-
- id = blobovnicza.NewIDFromBytes([]byte(p))
-
- return true, nil
+ it := &putIterator{
+ B: b,
+ ID: nil,
+ AllFull: true,
+ PutPrm: putPrm,
}
- if err := b.iterateDeepest(prm.Address, fn); err != nil {
+ if err := b.iterateDeepest(ctx, prm.Address, func(s string) (bool, error) { return it.iterate(ctx, s) }); err != nil {
return common.PutRes{}, err
- } else if id == nil {
- if allFull {
+ } else if it.ID == nil {
+ if it.AllFull {
return common.PutRes{}, common.ErrNoSpace
}
return common.PutRes{}, errPutFailed
}
- return common.PutRes{StorageID: id.Bytes()}, nil
+ success = true
+ return common.PutRes{StorageID: it.ID.Bytes()}, nil
+}
+
+type putIterator struct {
+ B *Blobovniczas
+ ID *ID
+ AllFull bool
+ PutPrm blobovnicza.PutPrm
+}
+
+func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) {
+ active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
+ if err != nil {
+ if !isLogical(err) {
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
+ } else {
+ i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
+ zap.Error(err))
+ }
+
+ return false, nil
+ }
+
+ if active == nil {
+ i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
+ return false, nil
+ }
+ defer active.Close(ctx)
+
+ i.AllFull = false
+
+ _, err = active.Blobovnicza().Put(ctx, i.PutPrm)
+ if err != nil {
+ if !isLogical(err) {
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
+ } else {
+ i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
+ zap.String("path", active.SystemPath()),
+ zap.Error(err))
+ }
+ if errors.Is(err, blobovnicza.ErrNoSpace) {
+ i.AllFull = true
+ }
+ return false, nil
+ }
+
+ idx := u64FromHexString(filepath.Base(active.SystemPath()))
+ i.ID = NewIDFromBytes([]byte(filepath.Join(lvlPath, u64ToHexString(idx))))
+
+ return true, nil
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
new file mode 100644
index 0000000000..a840275b8c
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -0,0 +1,618 @@
+package blobovniczatree
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+const rebuildSuffix = ".rebuild"
+
+var (
+ errRebuildInProgress = errors.New("rebuild is in progress, the operation cannot be performed")
+ errBatchFull = errors.New("batch full")
+)
+
+func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (common.RebuildRes, error) {
+ if b.readOnly {
+ return common.RebuildRes{}, common.ErrReadOnly
+ }
+
+ b.metrics.SetRebuildStatus(rebuildStatusRunning)
+ b.metrics.SetRebuildPercent(0)
+ success := true
+ defer func() {
+ if success {
+ b.metrics.SetRebuildStatus(rebuildStatusCompleted)
+ } else {
+ b.metrics.SetRebuildStatus(rebuildStatusFailed)
+ }
+ }()
+
+ b.rebuildGuard.Lock()
+ defer b.rebuildGuard.Unlock()
+
+ var res common.RebuildRes
+
+ b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild)
+ completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter)
+ res.ObjectsMoved += completedPreviosMoves
+ if err != nil {
+ b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
+ success = false
+ return res, err
+ }
+ b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
+
+ b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild)
+ dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
+ if err != nil {
+ b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
+ success = false
+ return res, err
+ }
+
+ b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
+ res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res)
+ if err != nil {
+ success = false
+ }
+ return res, err
+}
+
+func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) {
+ var completedDBCount uint32
+ for _, db := range dbs {
+ b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
+ movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter)
+ res.ObjectsMoved += movedObjects
+ if err != nil {
+ b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
+ return res, err
+ }
+ b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
+ res.FilesRemoved++
+ completedDBCount++
+ b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs)))
+ }
+ b.metrics.SetRebuildPercent(100)
+ return res, nil
+}
+
+func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, fillPercent int) ([]string, error) {
+ withSchemaChange, err := b.selectDBsDoNotMatchSchema(ctx)
+ if err != nil {
+ return nil, err
+ }
+ withFillPercent, err := b.selectDBsDoNotMatchFillPercent(ctx, fillPercent)
+ if err != nil {
+ return nil, err
+ }
+ for k := range withFillPercent {
+ withSchemaChange[k] = struct{}{}
+ }
+ result := make([]string, 0, len(withSchemaChange))
+ for db := range withSchemaChange {
+ result = append(result, db)
+ }
+ return result, nil
+}
+
+func (b *Blobovniczas) selectDBsDoNotMatchSchema(ctx context.Context) (map[string]struct{}, error) {
+ dbsToMigrate := make(map[string]struct{})
+ if err := b.iterateExistingDBPaths(ctx, func(s string) (bool, error) {
+ dbsToMigrate[s] = struct{}{}
+ return false, nil
+ }); err != nil {
+ return nil, err
+ }
+ if err := b.iterateSortedLeaves(ctx, nil, func(s string) (bool, error) {
+ delete(dbsToMigrate, s)
+ return false, nil
+ }); err != nil {
+ return nil, err
+ }
+ return dbsToMigrate, nil
+}
+
+func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, target int) (map[string]struct{}, error) {
+ if target <= 0 || target > 100 {
+ return nil, fmt.Errorf("invalid fill percent value %d: must be (0; 100]", target)
+ }
+ result := make(map[string]struct{})
+ if err := b.iterateDeepest(ctx, oid.Address{}, func(lvlPath string) (bool, error) {
+ dir := filepath.Join(b.rootPath, lvlPath)
+ entries, err := os.ReadDir(dir)
+ if os.IsNotExist(err) { // non initialized tree
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ hasDBs := false
+ // db with maxIdx could be an active, so it should not be rebuilded
+ var maxIdx uint64
+ for _, e := range entries {
+ if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
+ continue
+ }
+ hasDBs = true
+ maxIdx = max(u64FromHexString(e.Name()), maxIdx)
+ }
+ if !hasDBs {
+ return false, nil
+ }
+ for _, e := range entries {
+ if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
+ continue
+ }
+ if u64FromHexString(e.Name()) == maxIdx {
+ continue
+ }
+ path := filepath.Join(lvlPath, e.Name())
+ resettlementRequired, err := b.rebuildBySize(ctx, path, target)
+ if err != nil {
+ return false, err
+ }
+ if resettlementRequired {
+ result[path] = struct{}{}
+ }
+ }
+ return false, nil
+ }); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFillPercent int) (bool, error) {
+ shDB := b.getBlobovnicza(ctx, path)
+ blz, err := shDB.Open(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer shDB.Close(ctx)
+ fp := blz.FillPercent()
+ // accepted fill percent defines as
+ // |----|+++++++++++++++++|+++++++++++++++++|---------------
+ // 0% target 100% 100+(100 - target)
+ // where `+` - accepted fill percent, `-` - not accepted fill percent
+ return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil
+}
+
+func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) {
+ shDB := b.getBlobovnicza(ctx, path)
+ blz, err := shDB.Open(ctx)
+ if err != nil {
+ return 0, err
+ }
+ shDBClosed := false
+ defer func() {
+ if shDBClosed {
+ return
+ }
+ shDB.Close(ctx)
+ }()
+ dropTempFile, err := b.addRebuildTempFile(ctx, path)
+ if err != nil {
+ return 0, err
+ }
+ migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter)
+ if err != nil {
+ return migratedObjects, err
+ }
+ shDBClosed, err = b.dropDB(ctx, path, shDB)
+ if err == nil {
+ // drop only on success to continue rebuild on error
+ dropTempFile()
+ }
+ return migratedObjects, err
+}
+
+func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) {
+ sysPath := filepath.Join(b.rootPath, path)
+ sysPath += rebuildSuffix
+ _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
+ if err != nil {
+ return nil, err
+ }
+ return func() {
+ if err := os.Remove(sysPath); err != nil {
+ b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ }
+ }, nil
+}
+
+func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) {
+ var result atomic.Uint64
+ batch := make(map[oid.Address][]byte)
+
+ var prm blobovnicza.IteratePrm
+ prm.DecodeAddresses()
+ prm.SetHandler(func(ie blobovnicza.IterationElement) error {
+ batch[ie.Address()] = bytes.Clone(ie.ObjectData())
+ if len(batch) == b.blzMoveBatchSize {
+ return errBatchFull
+ }
+ return nil
+ })
+
+ for {
+ release, err := limiter.ReadRequest(ctx)
+ if err != nil {
+ return result.Load(), err
+ }
+ _, err = blz.Iterate(ctx, prm)
+ release()
+ if err != nil && !errors.Is(err, errBatchFull) {
+ return result.Load(), err
+ }
+
+ if len(batch) == 0 {
+ break
+ }
+
+ eg, egCtx := errgroup.WithContext(ctx)
+
+ for addr, data := range batch {
+ release, err := limiter.AcquireWorkSlot(egCtx)
+ if err != nil {
+ _ = eg.Wait()
+ return result.Load(), err
+ }
+ eg.Go(func() error {
+ defer release()
+ moveRelease, err := limiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ err = b.moveObject(egCtx, blz, blzPath, addr, data, meta)
+ moveRelease()
+ if err == nil {
+ result.Add(1)
+ }
+ return err
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return result.Load(), err
+ }
+
+ batch = make(map[oid.Address][]byte)
+ }
+
+ return result.Load(), nil
+}
+
+func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
+ addr oid.Address, data []byte, metaStore common.MetaStorage,
+) error {
+ startedAt := time.Now()
+ defer func() {
+ b.metrics.ObjectMoved(time.Since(startedAt))
+ }()
+ it := &moveIterator{
+ B: b,
+ ID: nil,
+ AllFull: true,
+ Address: addr,
+ ObjectData: data,
+ MetaStore: metaStore,
+ Source: source,
+ SourceSysPath: sourcePath,
+ }
+
+ if err := b.iterateDeepest(ctx, addr, func(lvlPath string) (bool, error) { return it.tryMoveToLvl(ctx, lvlPath) }); err != nil {
+ return err
+ } else if it.ID == nil {
+ if it.AllFull {
+ return common.ErrNoSpace
+ }
+ return errPutFailed
+ }
+ return nil
+}
+
+func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) {
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ case <-time.After(b.waitBeforeDropDB): // to complete requests with old storage ID
+ }
+
+ b.dbCache.EvictAndMarkNonCached(path)
+ defer b.dbCache.RemoveFromNonCached(path)
+
+ b.dbFilesGuard.Lock()
+ defer b.dbFilesGuard.Unlock()
+
+ if err := shDB.CloseAndRemoveFile(ctx); err != nil {
+ return false, err
+ }
+ b.commondbManager.CleanResources(path)
+ if err := b.dropDirectoryIfEmpty(filepath.Dir(path)); err != nil {
+ return true, err
+ }
+ return true, nil
+}
+
+func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error {
+ if path == "." {
+ return nil
+ }
+
+ sysPath := filepath.Join(b.rootPath, path)
+ entries, err := os.ReadDir(sysPath)
+ if err != nil {
+ return err
+ }
+ if len(entries) > 0 {
+ return nil
+ }
+ if err := os.Remove(sysPath); err != nil {
+ return err
+ }
+ return b.dropDirectoryIfEmpty(filepath.Dir(path))
+}
+
+func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) {
+ var count uint64
+ var rebuildTempFilesToRemove []string
+ err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
+ rebuildTmpFilePath := s
+ s = strings.TrimSuffix(s, rebuildSuffix)
+ shDB := b.getBlobovnicza(ctx, s)
+ blz, err := shDB.Open(ctx)
+ if err != nil {
+ return true, err
+ }
+ defer shDB.Close(ctx)
+
+ release, err := rateLimiter.ReadRequest(ctx)
+ if err != nil {
+ return false, err
+ }
+ incompletedMoves, err := blz.ListMoveInfo(ctx)
+ release()
+ if err != nil {
+ return true, err
+ }
+
+ for _, move := range incompletedMoves {
+ release, err := rateLimiter.WriteRequest(ctx)
+ if err != nil {
+ return false, err
+ }
+ err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore)
+ release()
+ if err != nil {
+ return true, err
+ }
+ count++
+ }
+
+ rebuildTempFilesToRemove = append(rebuildTempFilesToRemove, rebuildTmpFilePath)
+ return false, nil
+ })
+ for _, tmp := range rebuildTempFilesToRemove {
+ release, err := rateLimiter.WriteRequest(ctx)
+ if err != nil {
+ return count, err
+ }
+ if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
+ b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ }
+ release()
+ }
+ return count, err
+}
+
+func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
+ move blobovnicza.MoveInfo, metaStore common.MetaStorage,
+) error {
+ targetDB := b.getBlobovnicza(ctx, NewIDFromBytes(move.TargetStorageID).Path())
+ target, err := targetDB.Open(ctx)
+ if err != nil {
+ return err
+ }
+ defer targetDB.Close(ctx)
+
+ existsInSource := true
+ var gPrm blobovnicza.GetPrm
+ gPrm.SetAddress(move.Address)
+ gRes, err := source.Get(ctx, gPrm)
+ if err != nil {
+ if client.IsErrObjectNotFound(err) {
+ existsInSource = false
+ } else {
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
+ return err
+ }
+ }
+
+ if !existsInSource { // object was deleted by Rebuild, need to delete move info
+ if err = source.DropMoveInfo(ctx, move.Address); err != nil {
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
+ return err
+ }
+ b.deleteProtectedObjects.Delete(move.Address)
+ return nil
+ }
+
+ existsInTarget, err := target.Exists(ctx, move.Address)
+ if err != nil {
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
+ return err
+ }
+
+ if !existsInTarget {
+ var putPrm blobovnicza.PutPrm
+ putPrm.SetAddress(move.Address)
+ putPrm.SetMarshaledObject(gRes.Object())
+ _, err = target.Put(ctx, putPrm)
+ if err != nil {
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
+ return err
+ }
+ }
+
+ if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil {
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
+ return err
+ }
+
+ var deletePrm blobovnicza.DeletePrm
+ deletePrm.SetAddress(move.Address)
+ if _, err = source.Delete(ctx, deletePrm); err != nil {
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
+ return err
+ }
+
+ if err = source.DropMoveInfo(ctx, move.Address); err != nil {
+ b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
+ return err
+ }
+
+ b.deleteProtectedObjects.Delete(move.Address)
+ return nil
+}
+
+type moveIterator struct {
+ B *Blobovniczas
+ ID *ID
+ AllFull bool
+ Address oid.Address
+ ObjectData []byte
+ MetaStore common.MetaStorage
+ Source *blobovnicza.Blobovnicza
+ SourceSysPath string
+}
+
+func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) {
+ target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
+ if err != nil {
+ if !isLogical(err) {
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
+ } else {
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
+ }
+ return false, nil
+ }
+
+ if target == nil {
+ i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
+ return false, nil
+ }
+ defer target.Close(ctx)
+
+ i.AllFull = false
+
+ targetIDx := u64FromHexString(filepath.Base(target.SystemPath()))
+ targetStorageID := NewIDFromBytes([]byte(filepath.Join(lvlPath, u64ToHexString(targetIDx))))
+
+ if err = i.Source.PutMoveInfo(ctx, blobovnicza.MoveInfo{
+ Address: i.Address,
+ TargetStorageID: targetStorageID.Bytes(),
+ }); err != nil {
+ if !isLogical(err) {
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
+ } else {
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
+ }
+ return true, nil
+ }
+ i.B.deleteProtectedObjects.Add(i.Address)
+
+ var putPrm blobovnicza.PutPrm
+ putPrm.SetAddress(i.Address)
+ putPrm.SetMarshaledObject(i.ObjectData)
+ putPrm.SetForce(true)
+
+ _, err = target.Blobovnicza().Put(ctx, putPrm)
+ if err != nil {
+ if !isLogical(err) {
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
+ } else {
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
+ }
+ return true, nil
+ }
+
+ if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil {
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
+ return true, nil
+ }
+
+ var deletePrm blobovnicza.DeletePrm
+ deletePrm.SetAddress(i.Address)
+ if _, err = i.Source.Delete(ctx, deletePrm); err != nil {
+ if !isLogical(err) {
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err)
+ } else {
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
+ }
+ return true, nil
+ }
+
+ if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil {
+ if !isLogical(err) {
+ i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err)
+ } else {
+ i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
+ }
+ return true, nil
+ }
+ i.B.deleteProtectedObjects.Delete(i.Address)
+
+ i.ID = targetStorageID
+ return true, nil
+}
+
+type addressMap struct {
+ data map[oid.Address]struct{}
+ guard *sync.RWMutex
+}
+
+func newAddressMap() *addressMap {
+ return &addressMap{
+ data: make(map[oid.Address]struct{}),
+ guard: &sync.RWMutex{},
+ }
+}
+
+func (m *addressMap) Add(address oid.Address) {
+ m.guard.Lock()
+ defer m.guard.Unlock()
+
+ m.data[address] = struct{}{}
+}
+
+func (m *addressMap) Delete(address oid.Address) {
+ m.guard.Lock()
+ defer m.guard.Unlock()
+
+ delete(m.data, address)
+}
+
+func (m *addressMap) Contains(address oid.Address) bool {
+ m.guard.RLock()
+ defer m.guard.RUnlock()
+
+ _, contains := m.data[address]
+ return contains
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
new file mode 100644
index 0000000000..4146ef2606
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
@@ -0,0 +1,213 @@
+package blobovniczatree
+
+import (
+ "bytes"
+ "context"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRebuildFailover(t *testing.T) {
+ t.Parallel()
+
+ t.Run("only move info saved", testRebuildFailoverOnlyMoveInfoSaved)
+
+ t.Run("object saved to target", testRebuildFailoverObjectSavedToTarget)
+
+ t.Run("object deleted from source", testRebuildFailoverObjectDeletedFromSource)
+}
+
+func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
+ t.Parallel()
+ dir := t.TempDir()
+
+ blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
+
+ obj := blobstortest.NewObject(1024)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+
+ var pPrm blobovnicza.PutPrm
+ pPrm.SetAddress(object.AddressOf(obj))
+ pPrm.SetMarshaledObject(data)
+ _, err = blz.Put(context.Background(), pPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, blz.PutMoveInfo(context.Background(), blobovnicza.MoveInfo{
+ Address: object.AddressOf(obj),
+ TargetStorageID: []byte("0/0/0"),
+ }))
+
+ require.NoError(t, blz.Close(context.Background()))
+ _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
+ require.NoError(t, err)
+
+ testRebuildFailoverValidate(t, dir, obj, true)
+}
+
+func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
+ t.Parallel()
+ dir := t.TempDir()
+
+ blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
+
+ obj := blobstortest.NewObject(1024)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+
+ var pPrm blobovnicza.PutPrm
+ pPrm.SetAddress(object.AddressOf(obj))
+ pPrm.SetMarshaledObject(data)
+ _, err = blz.Put(context.Background(), pPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, blz.PutMoveInfo(context.Background(), blobovnicza.MoveInfo{
+ Address: object.AddressOf(obj),
+ TargetStorageID: []byte("0/0/0"),
+ }))
+
+ require.NoError(t, blz.Close(context.Background()))
+
+ _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
+ require.NoError(t, err)
+
+ blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
+
+ _, err = blz.Put(context.Background(), pPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, blz.Close(context.Background()))
+
+ testRebuildFailoverValidate(t, dir, obj, true)
+}
+
+func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
+ t.Parallel()
+ dir := t.TempDir()
+
+ blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
+
+ obj := blobstortest.NewObject(1024)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+
+ require.NoError(t, blz.PutMoveInfo(context.Background(), blobovnicza.MoveInfo{
+ Address: object.AddressOf(obj),
+ TargetStorageID: []byte("0/0/0"),
+ }))
+
+ require.NoError(t, blz.Close(context.Background()))
+
+ _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
+ require.NoError(t, err)
+
+ blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
+
+ var pPrm blobovnicza.PutPrm
+ pPrm.SetAddress(object.AddressOf(obj))
+ pPrm.SetMarshaledObject(data)
+ _, err = blz.Put(context.Background(), pPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, blz.Close(context.Background()))
+
+ testRebuildFailoverValidate(t, dir, obj, false)
+}
+
+func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) {
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(2048),
+ WithBlobovniczaShallowWidth(2),
+ WithBlobovniczaShallowDepth(2),
+ WithRootPath(dir),
+ WithBlobovniczaSize(10*1024),
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ var dPrm common.DeletePrm
+ dPrm.Address = object.AddressOf(obj)
+ dPrm.StorageID = []byte("0/0/1")
+ _, err := b.Delete(context.Background(), dPrm)
+ require.ErrorIs(t, err, errObjectIsDeleteProtected)
+
+ metaStub := &storageIDUpdateStub{
+ storageIDs: make(map[oid.Address][]byte),
+ guard: &sync.Mutex{},
+ }
+ limiter := &rebuildLimiterStub{}
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 1,
+ })
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), rRes.ObjectsMoved)
+ require.Equal(t, uint64(0), rRes.FilesRemoved)
+
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
+
+ blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
+
+ moveInfo, err := blz.ListMoveInfo(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 0, len(moveInfo))
+
+ var gPrm blobovnicza.GetPrm
+ gPrm.SetAddress(object.AddressOf(obj))
+ _, err = blz.Get(context.Background(), gPrm)
+ require.True(t, client.IsErrObjectNotFound(err))
+
+ require.NoError(t, blz.Close(context.Background()))
+
+ blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
+ require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Init(context.Background()))
+
+ moveInfo, err = blz.ListMoveInfo(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 0, len(moveInfo))
+
+ gRes, err := blz.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ require.True(t, len(gRes.Object()) > 0)
+
+ if mustUpdateStorageID {
+ require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)]))
+ }
+
+ require.NoError(t, blz.Close(context.Background()))
+
+ _, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
+ require.True(t, os.IsNotExist(err))
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
new file mode 100644
index 0000000000..a7a99fec36
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -0,0 +1,520 @@
+package blobovniczatree
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestBlobovniczaTreeSchemaRebuild(t *testing.T) {
+ t.Parallel()
+
+ t.Run("width increased", func(t *testing.T) {
+ t.Parallel()
+ testBlobovniczaTreeRebuildHelper(t, 2, 2, 2, 3, false)
+ })
+
+ t.Run("width reduced", func(t *testing.T) {
+ t.Parallel()
+ testBlobovniczaTreeRebuildHelper(t, 2, 2, 2, 1, true)
+ })
+
+ t.Run("depth increased", func(t *testing.T) {
+ t.Parallel()
+ testBlobovniczaTreeRebuildHelper(t, 1, 2, 2, 2, true)
+ })
+
+ t.Run("depth reduced", func(t *testing.T) {
+ t.Parallel()
+ testBlobovniczaTreeRebuildHelper(t, 2, 2, 1, 2, true)
+ })
+}
+
+func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
+ t.Parallel()
+
+ t.Run("no rebuild by fill percent", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ for range 100 {
+ obj := blobstortest.NewObject(64 * 1024) // 64KB object
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ }
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ limiter := &rebuildLimiterStub{}
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 60,
+ })
+ require.NoError(t, err)
+ dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
+ require.False(t, dataMigrated)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
+ })
+
+ t.Run("no rebuild single db", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB soft limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ obj := blobstortest.NewObject(64 * 1024) // 64KB object
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ limiter := &rebuildLimiterStub{}
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 90, // 64KB / 100KB = 64%
+ })
+ require.NoError(t, err)
+ dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
+ require.False(t, dataMigrated)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
+ })
+
+ t.Run("rebuild by fill percent", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ toDelete := make(map[oid.Address][]byte)
+ for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ obj := blobstortest.NewObject(64 * 1024)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ if i%2 == 1 {
+ toDelete[prm.Address] = res.StorageID
+ }
+ }
+ for addr, storageID := range toDelete {
+ var prm common.DeletePrm
+ prm.Address = addr
+ prm.StorageID = storageID
+ _, err := b.Delete(context.Background(), prm)
+ require.NoError(t, err)
+ }
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ limiter := &rebuildLimiterStub{}
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 80,
+ })
+ require.NoError(t, err)
+ require.Equal(t, uint64(49), rRes.FilesRemoved)
+ require.Equal(t, uint64(49), rRes.ObjectsMoved) // 49 DBs with 1 objects
+ require.Equal(t, uint64(49), metaStub.updatedCount)
+
+ for addr, storageID := range storageIDs {
+ if _, found := toDelete[addr]; found {
+ continue
+ }
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
+ })
+
+ t.Run("rebuild by overflow", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ obj := blobstortest.NewObject(64 * 1024)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ }
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ require.NoError(t, b.Close(context.Background()))
+ b = NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1),
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(50*1024), // 50 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ limiter := &rebuildLimiterStub{}
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ Limiter: limiter,
+ FillPercent: 80,
+ })
+ require.NoError(t, err)
+ require.Equal(t, uint64(49), rRes.FilesRemoved)
+ require.Equal(t, uint64(98), rRes.ObjectsMoved) // 49 DBs with 2 objects
+ require.Equal(t, uint64(98), metaStub.updatedCount)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
+ })
+}
+
+func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024), // 64KB object size limit
+ WithBlobovniczaShallowWidth(5),
+ WithBlobovniczaShallowDepth(2), // depth = 2
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024),
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ obj := blobstortest.NewObject(64 * 1024) // 64KB object
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ storageIDs := make(map[oid.Address][]byte)
+ storageIDs[prm.Address] = res.StorageID
+
+ require.NoError(t, b.Close(context.Background()))
+
+ b = NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(32*1024), // 32KB object size limit
+ WithBlobovniczaShallowWidth(5),
+ WithBlobovniczaShallowDepth(3), // depth = 3
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024),
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ limiter := &rebuildLimiterStub{}
+ var rPrm common.RebuildPrm
+ rPrm.MetaStorage = metaStub
+ rPrm.Limiter = limiter
+ rPrm.FillPercent = 1
+ rRes, err := b.Rebuild(context.Background(), rPrm)
+ require.NoError(t, err)
+ dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
+ require.True(t, dataMigrated)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
+}
+
+func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) {
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(2048),
+ WithBlobovniczaShallowWidth(sourceWidth),
+ WithBlobovniczaShallowDepth(sourceDepth),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024),
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ eg, egCtx := errgroup.WithContext(context.Background())
+ storageIDs := make(map[oid.Address][]byte)
+ storageIDsGuard := &sync.Mutex{}
+ for range 100 {
+ eg.Go(func() error {
+ obj := blobstortest.NewObject(1024)
+ data, err := obj.Marshal()
+ if err != nil {
+ return err
+ }
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(egCtx, prm)
+ if err != nil {
+ return err
+ }
+ storageIDsGuard.Lock()
+ storageIDs[prm.Address] = res.StorageID
+ storageIDsGuard.Unlock()
+ return nil
+ })
+ }
+
+ require.NoError(t, eg.Wait())
+ require.NoError(t, b.Close(context.Background()))
+
+ b = NewBlobovniczaTree(
+ context.Background(),
+ WithBlobovniczaLogger(test.NewLogger(t)),
+ WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(2048),
+ WithBlobovniczaShallowWidth(targetWidth),
+ WithBlobovniczaShallowDepth(targetDepth),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024),
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(50))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ limiter := &rebuildLimiterStub{}
+ var rPrm common.RebuildPrm
+ rPrm.MetaStorage = metaStub
+ rPrm.Limiter = limiter
+ rPrm.FillPercent = 1
+ rRes, err := b.Rebuild(context.Background(), rPrm)
+ require.NoError(t, err)
+ dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
+ require.Equal(t, shouldMigrate, dataMigrated)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close(context.Background()))
+ require.NoError(t, limiter.ValidateReleased())
+}
+
+type storageIDUpdateStub struct {
+ guard *sync.Mutex
+ storageIDs map[oid.Address][]byte
+ updatedCount uint64
+}
+
+func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error {
+ s.guard.Lock()
+ defer s.guard.Unlock()
+
+ s.storageIDs[addr] = storageID
+ s.updatedCount++
+ return nil
+}
+
+type rebuildLimiterStub struct {
+ slots atomic.Int64
+ readRequests atomic.Int64
+ writeRequests atomic.Int64
+}
+
+func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) {
+ s.slots.Add(1)
+ return func() { s.slots.Add(-1) }, nil
+}
+
+func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) {
+ s.readRequests.Add(1)
+ return func() { s.readRequests.Add(-1) }, nil
+}
+
+func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) {
+ s.writeRequests.Add(1)
+ return func() { s.writeRequests.Add(-1) }, nil
+}
+
+func (s *rebuildLimiterStub) ValidateReleased() error {
+ if v := s.slots.Load(); v != 0 {
+ return fmt.Errorf("invalid slots value %d", v)
+ }
+ if v := s.readRequests.Load(); v != 0 {
+ return fmt.Errorf("invalid read requests value %d", v)
+ }
+ if v := s.writeRequests.Load(); v != 0 {
+ return fmt.Errorf("invalid write requests value %d", v)
+ }
+ return nil
+}
diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go
index a6fe9935ed..ceaf2538a4 100644
--- a/pkg/local_object_storage/blobstor/blobstor.go
+++ b/pkg/local_object_storage/blobstor/blobstor.go
@@ -1,6 +1,7 @@
package blobstor
import (
+ "context"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -40,18 +41,21 @@ type SubStorageInfo struct {
type Option func(*cfg)
type cfg struct {
- compression compression.Config
+ compression compression.Compressor
log *logger.Logger
storage []SubStorage
+ metrics Metrics
}
func initConfig(c *cfg) {
- c.log = &logger.Logger{Logger: zap.L()}
+ c.log = logger.NewLoggerWrapper(zap.L())
+ c.metrics = &noopMetrics{}
}
// New creates, initializes and returns new BlobStor instance.
func New(opts ...Option) *BlobStor {
bs := new(BlobStor)
+ bs.mode = mode.Disabled
initConfig(&bs.cfg)
for i := range opts {
@@ -70,6 +74,13 @@ func (b *BlobStor) SetLogger(l *logger.Logger) {
b.log = l
}
+func (b *BlobStor) SetParentID(parentID string) {
+ b.metrics.SetParentID(parentID)
+ for _, ss := range b.storage {
+ ss.Storage.SetParentID(parentID)
+ }
+}
+
// WithStorages provides sub-blobstors.
func WithStorages(st []SubStorage) Option {
return func(c *cfg) {
@@ -80,36 +91,30 @@ func WithStorages(st []SubStorage) Option {
// WithLogger returns option to specify BlobStor's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))}
+ c.log = l
}
}
-// WithCompressObjects returns option to toggle
-// compression of the stored objects.
-//
-// If true, Zstandard algorithm is used for data compression.
-//
-// If compressor (decompressor) creation failed,
-// the uncompressed option will be used, and the error
-// is recorded in the provided log.
-func WithCompressObjects(comp bool) Option {
+func WithCompression(comp compression.Config) Option {
return func(c *cfg) {
- c.compression.Enabled = comp
- }
-}
-
-// WithUncompressableContentTypes returns option to disable decompression
-// for specific content types as seen by object.AttributeContentType attribute.
-func WithUncompressableContentTypes(values []string) Option {
- return func(c *cfg) {
- c.compression.UncompressableContentTypes = values
+ c.compression.Config = comp
}
}
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
-func (b *BlobStor) SetReportErrorFunc(f func(string, error)) {
+func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) {
for i := range b.storage {
b.storage[i].Storage.SetReportErrorFunc(f)
}
}
+
+func WithMetrics(m Metrics) Option {
+ return func(c *cfg) {
+ c.metrics = m
+ }
+}
+
+func (b *BlobStor) Compressor() *compression.Compressor {
+ return &b.compression
+}
diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go
index e6d680cb72..6ddeb6f008 100644
--- a/pkg/local_object_storage/blobstor/blobstor_test.go
+++ b/pkg/local_object_storage/blobstor/blobstor_test.go
@@ -1,40 +1,49 @@
package blobstor
import (
- "os"
+ "context"
"path/filepath"
+ "sync"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
)
-const blobovniczaDir = "blobovniczas"
-
-func defaultStorages(p string, smallSizeLimit uint64) []SubStorage {
+func defaultTestStorages(p string, smallSizeLimit uint64) ([]SubStorage, *teststore.TestStore, *teststore.TestStore) {
+ smallFileStorage := teststore.New(teststore.WithSubstorage(blobovniczatree.NewBlobovniczaTree(
+ context.Background(),
+ blobovniczatree.WithRootPath(filepath.Join(p, "blobovniczas")),
+ blobovniczatree.WithBlobovniczaShallowWidth(1)), // default width is 16, slow init
+ ))
+ largeFileStorage := teststore.New(teststore.WithSubstorage(fstree.New(fstree.WithPath(p))))
return []SubStorage{
{
- Storage: blobovniczatree.NewBlobovniczaTree(
- blobovniczatree.WithRootPath(filepath.Join(p, "blobovniczas")),
- blobovniczatree.WithBlobovniczaShallowWidth(1)), // default width is 16, slow init
+ Storage: smallFileStorage,
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) <= smallSizeLimit
},
},
{
- Storage: fstree.New(fstree.WithPath(p)),
+ Storage: largeFileStorage,
},
- }
+ }, smallFileStorage, largeFileStorage
+}
+
+func defaultStorages(p string, smallSizeLimit uint64) []SubStorage {
+ storages, _, _ := defaultTestStorages(p, smallSizeLimit)
+ return storages
}
func TestCompression(t *testing.T) {
- dir, err := os.MkdirTemp("", "frostfs*")
- require.NoError(t, err)
- t.Cleanup(func() { _ = os.RemoveAll(dir) })
+ dir := t.TempDir()
const (
smallSizeLimit = 512
@@ -43,26 +52,28 @@ func TestCompression(t *testing.T) {
newBlobStor := func(t *testing.T, compress bool) *BlobStor {
bs := New(
- WithCompressObjects(compress),
+ WithCompression(compression.Config{
+ Enabled: compress,
+ }),
WithStorages(defaultStorages(dir, smallSizeLimit)))
- require.NoError(t, bs.Open(false))
- require.NoError(t, bs.Init())
+ require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, bs.Init(context.Background()))
return bs
}
bigObj := make([]*objectSDK.Object, objCount)
smallObj := make([]*objectSDK.Object, objCount)
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
bigObj[i] = testObject(smallSizeLimit * 2)
smallObj[i] = testObject(smallSizeLimit / 2)
}
testGet := func(t *testing.T, b *BlobStor, i int) {
- res1, err := b.Get(common.GetPrm{Address: object.AddressOf(smallObj[i])})
+ res1, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(smallObj[i])})
require.NoError(t, err)
require.Equal(t, smallObj[i], res1.Object)
- res2, err := b.Get(common.GetPrm{Address: object.AddressOf(bigObj[i])})
+ res2, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(bigObj[i])})
require.NoError(t, err)
require.Equal(t, bigObj[i], res2.Object)
}
@@ -70,12 +81,12 @@ func TestCompression(t *testing.T) {
testPut := func(t *testing.T, b *BlobStor, i int) {
var prm common.PutPrm
prm.Object = smallObj[i]
- _, err = b.Put(prm)
+ _, err := b.Put(context.Background(), prm)
require.NoError(t, err)
prm = common.PutPrm{}
prm.Object = bigObj[i]
- _, err = b.Put(prm)
+ _, err = b.Put(context.Background(), prm)
require.NoError(t, err)
}
@@ -83,35 +94,36 @@ func TestCompression(t *testing.T) {
blobStor := newBlobStor(t, false)
testPut(t, blobStor, 0)
testGet(t, blobStor, 0)
- require.NoError(t, blobStor.Close())
+ require.NoError(t, blobStor.Close(context.Background()))
blobStor = newBlobStor(t, true)
testGet(t, blobStor, 0) // get uncompressed object with compress enabled
testPut(t, blobStor, 1)
testGet(t, blobStor, 1)
- require.NoError(t, blobStor.Close())
+ require.NoError(t, blobStor.Close(context.Background()))
blobStor = newBlobStor(t, false)
testGet(t, blobStor, 0) // get old uncompressed object
testGet(t, blobStor, 1) // get compressed object with compression disabled
testPut(t, blobStor, 2)
testGet(t, blobStor, 2)
- require.NoError(t, blobStor.Close())
+ require.NoError(t, blobStor.Close(context.Background()))
}
func TestBlobstor_needsCompression(t *testing.T) {
const smallSizeLimit = 512
newBlobStor := func(t *testing.T, compress bool, ct ...string) *BlobStor {
- dir, err := os.MkdirTemp("", "frostfs*")
- require.NoError(t, err)
- t.Cleanup(func() { _ = os.RemoveAll(dir) })
+ dir := t.TempDir()
bs := New(
- WithCompressObjects(compress),
- WithUncompressableContentTypes(ct),
+ WithCompression(compression.Config{
+ Enabled: compress,
+ UncompressableContentTypes: ct,
+ }),
WithStorages([]SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
+ context.Background(),
blobovniczatree.WithRootPath(filepath.Join(dir, "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowWidth(1)), // default width is 16, slow init
Policy: func(_ *objectSDK.Object, data []byte) bool {
@@ -122,8 +134,8 @@ func TestBlobstor_needsCompression(t *testing.T) {
Storage: fstree.New(fstree.WithPath(dir)),
},
}))
- require.NoError(t, bs.Open(false))
- require.NoError(t, bs.Init())
+ require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, bs.Init(context.Background()))
return bs
}
@@ -142,32 +154,185 @@ func TestBlobstor_needsCompression(t *testing.T) {
b := newBlobStor(t, true, "audio/*", "*/x-mpeg", "*/mpeg", "application/x-midi")
obj := newObjectWithCt("video/mpeg")
- require.False(t, b.NeedsCompression(obj))
+ require.False(t, b.compression.NeedsCompression(obj))
obj = newObjectWithCt("audio/aiff")
- require.False(t, b.NeedsCompression(obj))
+ require.False(t, b.compression.NeedsCompression(obj))
obj = newObjectWithCt("application/x-midi")
- require.False(t, b.NeedsCompression(obj))
+ require.False(t, b.compression.NeedsCompression(obj))
obj = newObjectWithCt("text/plain")
- require.True(t, b.NeedsCompression(obj))
+ require.True(t, b.compression.NeedsCompression(obj))
obj = newObjectWithCt("")
- require.True(t, b.NeedsCompression(obj))
+ require.True(t, b.compression.NeedsCompression(obj))
})
t.Run("content-types omitted", func(t *testing.T) {
b := newBlobStor(t, true)
obj := newObjectWithCt("video/mpeg")
- require.True(t, b.NeedsCompression(obj))
+ require.True(t, b.compression.NeedsCompression(obj))
})
t.Run("compress disabled", func(t *testing.T) {
b := newBlobStor(t, false, "video/mpeg")
obj := newObjectWithCt("video/mpeg")
- require.False(t, b.NeedsCompression(obj))
+ require.False(t, b.compression.NeedsCompression(obj))
obj = newObjectWithCt("text/plain")
- require.False(t, b.NeedsCompression(obj))
+ require.False(t, b.compression.NeedsCompression(obj))
+ })
+}
+
+func TestConcurrentPut(t *testing.T) {
+ dir := t.TempDir()
+
+ const (
+ smallSizeLimit = 512
+
+ // concurrentPutCount is fstree implementation specific
+ concurrentPutCount = 5
+ )
+
+ blobStor := New(
+ WithStorages(defaultStorages(dir, smallSizeLimit)))
+ require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, blobStor.Init(context.Background()))
+
+ testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
+ res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)})
+ require.NoError(t, err)
+ require.Equal(t, obj, res.Object)
+ }
+
+ testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
+ var prm common.PutPrm
+ prm.Object = obj
+ _, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ }
+
+ testPutFileExistsError := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
+ var prm common.PutPrm
+ prm.Object = obj
+ if _, err := b.Put(context.Background(), prm); err != nil {
+ require.ErrorContains(t, err, "file exists")
+ }
+ }
+
+ t.Run("put the same big object", func(t *testing.T) {
+ bigObj := testObject(smallSizeLimit * 2)
+
+ var wg sync.WaitGroup
+ for range concurrentPutCount {
+ wg.Add(1)
+ go func() {
+ testPut(t, blobStor, bigObj)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ testGet(t, blobStor, bigObj)
+ })
+
+ t.Run("put the same big object with error", func(t *testing.T) {
+ bigObj := testObject(smallSizeLimit * 2)
+
+ var wg sync.WaitGroup
+ for range concurrentPutCount + 1 {
+ wg.Add(1)
+ go func() {
+ testPutFileExistsError(t, blobStor, bigObj)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ testGet(t, blobStor, bigObj)
+ })
+
+ t.Run("put the same small object", func(t *testing.T) {
+ smallObj := testObject(smallSizeLimit / 2)
+
+ var wg sync.WaitGroup
+ for range concurrentPutCount {
+ wg.Add(1)
+ go func() {
+ testPut(t, blobStor, smallObj)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ testGet(t, blobStor, smallObj)
+ })
+}
+
+func TestConcurrentDelete(t *testing.T) {
+ dir := t.TempDir()
+
+ const smallSizeLimit = 512
+
+ blobStor := New(
+ WithStorages(defaultStorages(dir, smallSizeLimit)))
+ require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, blobStor.Init(context.Background()))
+
+ testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
+ var prm common.PutPrm
+ prm.Object = obj
+ _, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ }
+
+ testDelete := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
+ var prm common.DeletePrm
+ prm.Address = object.AddressOf(obj)
+ if _, err := b.Delete(context.Background(), prm); err != nil {
+ require.ErrorContains(t, err, "object not found")
+ }
+ }
+
+ testDeletedExists := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
+ var prm common.ExistsPrm
+ prm.Address = object.AddressOf(obj)
+ res, err := b.Exists(context.Background(), prm)
+ require.NoError(t, err)
+ require.False(t, res.Exists)
+ }
+
+ t.Run("delete the same big object", func(t *testing.T) {
+ bigObj := testObject(smallSizeLimit * 2)
+ testPut(t, blobStor, bigObj)
+
+ var wg sync.WaitGroup
+ for range 2 {
+ wg.Add(1)
+ go func() {
+ testDelete(t, blobStor, bigObj)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ testDeletedExists(t, blobStor, bigObj)
+ })
+
+ t.Run("delete the same small object", func(t *testing.T) {
+ smallObj := testObject(smallSizeLimit / 2)
+ testPut(t, blobStor, smallObj)
+
+ var wg sync.WaitGroup
+ for range 2 {
+ wg.Add(1)
+ go func() {
+ testDelete(t, blobStor, smallObj)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ testDeletedExists(t, blobStor, smallObj)
})
}
diff --git a/pkg/local_object_storage/blobstor/common/delete.go b/pkg/local_object_storage/blobstor/common/delete.go
index 1b04eab1a1..c19e099cbd 100644
--- a/pkg/local_object_storage/blobstor/common/delete.go
+++ b/pkg/local_object_storage/blobstor/common/delete.go
@@ -8,6 +8,7 @@ import (
type DeletePrm struct {
Address oid.Address
StorageID []byte
+ Size uint64
}
// DeleteRes groups the resulting values of Delete operation.
diff --git a/pkg/local_object_storage/blobstor/common/iterate.go b/pkg/local_object_storage/blobstor/common/iterate.go
index a6f0da26ba..a1b8ff0472 100644
--- a/pkg/local_object_storage/blobstor/common/iterate.go
+++ b/pkg/local_object_storage/blobstor/common/iterate.go
@@ -15,9 +15,7 @@ type IterationHandler func(IterationElement) error
// IteratePrm groups the parameters of Iterate operation.
type IteratePrm struct {
Handler IterationHandler
- LazyHandler func(oid.Address, func() ([]byte, error)) error
IgnoreErrors bool
- ErrorHandler func(oid.Address, error) error
}
// IterateRes groups the resulting values of Iterate operation.
diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go
new file mode 100644
index 0000000000..788fe66f25
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/common/rebuild.go
@@ -0,0 +1,38 @@
+package common
+
+import (
+ "context"
+
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type RebuildRes struct {
+ ObjectsMoved uint64
+ FilesRemoved uint64
+}
+
+type RebuildPrm struct {
+ MetaStorage MetaStorage
+ Limiter RebuildLimiter
+ FillPercent int
+}
+
+type MetaStorage interface {
+ UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error
+}
+
+type ReleaseFunc func()
+
+type ConcurrencyLimiter interface {
+ AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error)
+}
+
+type RateLimiter interface {
+ ReadRequest(context.Context) (ReleaseFunc, error)
+ WriteRequest(context.Context) (ReleaseFunc, error)
+}
+
+type RebuildLimiter interface {
+ ConcurrencyLimiter
+ RateLimiter
+}
diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go
index 76dd6d96eb..e35c35e602 100644
--- a/pkg/local_object_storage/blobstor/common/storage.go
+++ b/pkg/local_object_storage/blobstor/common/storage.go
@@ -1,25 +1,36 @@
package common
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
// Storage represents key-value object storage.
// It is used as a building block for a blobstor of a shard.
type Storage interface {
- Open(readOnly bool) error
+ Open(mode mode.ComponentMode) error
Init() error
- Close() error
+ Close(context.Context) error
Type() string
Path() string
- SetCompressor(cc *compression.Config)
+ ObjectsCount(ctx context.Context) (uint64, error)
+
+ SetCompressor(cc *compression.Compressor)
+ Compressor() *compression.Compressor
+
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
- SetReportErrorFunc(f func(string, error))
+ SetReportErrorFunc(f func(context.Context, string, error))
+ SetParentID(parentID string)
- Get(GetPrm) (GetRes, error)
- GetRange(GetRangePrm) (GetRangeRes, error)
- Exists(ExistsPrm) (ExistsRes, error)
- Put(PutPrm) (PutRes, error)
- Delete(DeletePrm) (DeleteRes, error)
- Iterate(IteratePrm) (IterateRes, error)
+ Get(context.Context, GetPrm) (GetRes, error)
+ GetRange(context.Context, GetRangePrm) (GetRangeRes, error)
+ Exists(context.Context, ExistsPrm) (ExistsRes, error)
+ Put(context.Context, PutPrm) (PutRes, error)
+ Delete(context.Context, DeletePrm) (DeleteRes, error)
+ Iterate(context.Context, IteratePrm) (IterateRes, error)
+ Rebuild(context.Context, RebuildPrm) (RebuildRes, error)
}
diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go
index 6e05366cfb..445a0494bc 100644
--- a/pkg/local_object_storage/blobstor/compression/bench_test.go
+++ b/pkg/local_object_storage/blobstor/compression/bench_test.go
@@ -3,13 +3,15 @@ package compression
import (
"crypto/rand"
"fmt"
+ "log"
"testing"
+ "github.com/klauspost/compress"
"github.com/stretchr/testify/require"
)
func BenchmarkCompression(b *testing.B) {
- c := Config{Enabled: true}
+ c := Compressor{Config: Config{Enabled: true}}
require.NoError(b, c.Init())
for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} {
@@ -31,10 +33,10 @@ func BenchmarkCompression(b *testing.B) {
}
}
-func benchWith(b *testing.B, c Config, data []byte) {
+func benchWith(b *testing.B, c Compressor, data []byte) {
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for range b.N {
_ = c.Compress(data)
}
}
@@ -47,3 +49,54 @@ func notSoRandomSlice(size, blockSize int) []byte {
}
return data
}
+
+func BenchmarkCompressionRealVSEstimate(b *testing.B) {
+ var total float64 // to prevent from compiler optimizations
+ maxSize := 60 * 1024 * 1024
+ b.Run("estimate", func(b *testing.B) {
+ b.ResetTimer()
+
+ c := &Compressor{
+ Config: Config{
+ Enabled: true,
+ },
+ }
+ require.NoError(b, c.Init())
+
+ for size := 1024; size <= maxSize; size *= 2 {
+ data := make([]byte, size)
+ _, err := rand.Reader.Read(data)
+ require.NoError(b, err)
+
+ b.StartTimer()
+ estimation := compress.Estimate(data)
+ total += estimation
+ b.StopTimer()
+ }
+ })
+
+ b.Run("compress", func(b *testing.B) {
+ b.ResetTimer()
+
+ c := &Compressor{
+ Config: Config{
+ Enabled: true,
+ },
+ }
+ require.NoError(b, c.Init())
+
+ for size := 1024; size <= maxSize; size *= 2 {
+ data := make([]byte, size)
+ _, err := rand.Reader.Read(data)
+ require.NoError(b, err)
+
+ b.StartTimer()
+ maxSize := c.encoder.MaxEncodedSize(len(data))
+ compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize))
+ total += float64(len(compressed)) / float64(len(data))
+ b.StopTimer()
+ }
+ })
+
+ log.Println(total)
+}
diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go
index 79e37f8aef..c76cec9a10 100644
--- a/pkg/local_object_storage/blobstor/compression/compress.go
+++ b/pkg/local_object_storage/blobstor/compression/compress.go
@@ -4,17 +4,36 @@ import (
"bytes"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/klauspost/compress"
"github.com/klauspost/compress/zstd"
)
+type Level string
+
+const (
+ LevelDefault Level = ""
+ LevelOptimal Level = "optimal"
+ LevelFastest Level = "fastest"
+ LevelSmallestSize Level = "smallest_size"
+)
+
+type Compressor struct {
+ Config
+
+ encoder *zstd.Encoder
+ decoder *zstd.Decoder
+}
+
// Config represents common compression-related configuration.
type Config struct {
Enabled bool
UncompressableContentTypes []string
+ Level Level
- encoder *zstd.Encoder
- decoder *zstd.Decoder
+ EstimateCompressibility bool
+ EstimateCompressibilityThreshold float64
}
// zstdFrameMagic contains first 4 bytes of any compressed object
@@ -22,22 +41,18 @@ type Config struct {
var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
// Init initializes compression routines.
-func (c *Config) Init() error {
+func (c *Compressor) Init() error {
var err error
if c.Enabled {
- c.encoder, err = zstd.NewWriter(nil)
+ c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel()))
if err != nil {
return err
}
}
c.decoder, err = zstd.NewReader(nil)
- if err != nil {
- return err
- }
-
- return nil
+ return err
}
// NeedsCompression returns true if the object should be compressed.
@@ -73,7 +88,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool {
// Decompress decompresses data if it starts with the magic
// and returns data untouched otherwise.
-func (c *Config) Decompress(data []byte) ([]byte, error) {
+func (c *Compressor) Decompress(data []byte) ([]byte, error) {
if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) {
return data, nil
}
@@ -82,16 +97,31 @@ func (c *Config) Decompress(data []byte) ([]byte, error) {
// Compress compresses data if compression is enabled
// and returns data untouched otherwise.
-func (c *Config) Compress(data []byte) []byte {
+func (c *Compressor) Compress(data []byte) []byte {
if c == nil || !c.Enabled {
return data
}
+ if c.EstimateCompressibility {
+ estimated := compress.Estimate(data)
+ if estimated >= c.EstimateCompressibilityThreshold {
+ return c.compress(data)
+ }
+ return data
+ }
+ return c.compress(data)
+}
+
+func (c *Compressor) compress(data []byte) []byte {
maxSize := c.encoder.MaxEncodedSize(len(data))
- return c.encoder.EncodeAll(data, make([]byte, 0, maxSize))
+ compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize))
+ if len(data) < len(compressed) {
+ return data
+ }
+ return compressed
}
// Close closes encoder and decoder, returns any error occurred.
-func (c *Config) Close() error {
+func (c *Compressor) Close() error {
var err error
if c.encoder != nil {
err = c.encoder.Close()
@@ -101,3 +131,24 @@ func (c *Config) Close() error {
}
return err
}
+
+func (c *Config) HasValidCompressionLevel() bool {
+ return c.Level == LevelDefault ||
+ c.Level == LevelOptimal ||
+ c.Level == LevelFastest ||
+ c.Level == LevelSmallestSize
+}
+
+func (c *Compressor) compressionLevel() zstd.EncoderLevel {
+ switch c.Level {
+ case LevelDefault, LevelOptimal:
+ return zstd.SpeedDefault
+ case LevelFastest:
+ return zstd.SpeedFastest
+ case LevelSmallestSize:
+ return zstd.SpeedBestCompression
+ default:
+ assert.Fail("unknown compression level", string(c.Level))
+ return zstd.SpeedDefault
+ }
+}
diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go
index 6ceb9cefae..0418eedd05 100644
--- a/pkg/local_object_storage/blobstor/control.go
+++ b/pkg/local_object_storage/blobstor/control.go
@@ -1,18 +1,41 @@
package blobstor
import (
+ "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"go.uber.org/zap"
)
// Open opens BlobStor.
-func (b *BlobStor) Open(readOnly bool) error {
- b.log.Debug("opening...")
+func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error {
+ b.log.Debug(ctx, logs.BlobstorOpening)
+ b.modeMtx.Lock()
+ defer b.modeMtx.Unlock()
+ b.mode = mode
+
+ err := b.openBlobStor(ctx, mode)
+ if err != nil {
+ return err
+ }
+ b.metrics.SetMode(mode.ReadOnly())
+
+ return nil
+}
+
+func (b *BlobStor) openBlobStor(ctx context.Context, mod mode.Mode) error {
for i := range b.storage {
- err := b.storage[i].Storage.Open(readOnly)
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ err := b.storage[i].Storage.Open(mode.ConvertToComponentMode(mod))
if err != nil {
return err
}
@@ -28,9 +51,13 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
// If BlobStor is already initialized, no action is taken.
//
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
-func (b *BlobStor) Init() error {
- b.log.Debug("initializing...")
+func (b *BlobStor) Init(ctx context.Context) error {
+ b.log.Debug(ctx, logs.BlobstorInitializing)
+ if !b.compression.HasValidCompressionLevel() {
+ b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level)))
+ b.compression.Level = compression.LevelDefault
+ }
if err := b.compression.Init(); err != nil {
return err
}
@@ -45,14 +72,14 @@ func (b *BlobStor) Init() error {
}
// Close releases all internal resources of BlobStor.
-func (b *BlobStor) Close() error {
- b.log.Debug("closing...")
+func (b *BlobStor) Close(ctx context.Context) error {
+ b.log.Debug(ctx, logs.BlobstorClosing)
var firstErr error
for i := range b.storage {
- err := b.storage[i].Storage.Close()
+ err := b.storage[i].Storage.Close(ctx)
if err != nil {
- b.log.Info("couldn't close storage", zap.String("error", err.Error()))
+ b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err))
if firstErr == nil {
firstErr = err
}
@@ -64,5 +91,8 @@ func (b *BlobStor) Close() error {
if firstErr == nil {
firstErr = err
}
+ if firstErr == nil {
+ b.metrics.Close()
+ }
return firstErr
}
diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go
index 8c5a7aba62..86d8f15e34 100644
--- a/pkg/local_object_storage/blobstor/delete.go
+++ b/pkg/local_object_storage/blobstor/delete.go
@@ -1,26 +1,50 @@
package blobstor
import (
- "errors"
+ "context"
+ "encoding/hex"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
-func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
+func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ b.metrics.Delete(time.Since(startedAt), success, prm.StorageID != nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Delete",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID == nil {
for i := range b.storage {
- res, err := b.storage[i].Storage.Delete(prm)
- if err == nil || !errors.As(err, new(apistatus.ObjectNotFound)) {
+ res, err := b.storage[i].Storage.Delete(ctx, prm)
+ if err == nil || !client.IsErrObjectNotFound(err) {
if err == nil {
- logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
+ success = true
+ logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
}
return res, err
}
}
+ return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
var st common.Storage
@@ -31,9 +55,10 @@ func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
st = b.storage[0].Storage
}
- res, err := st.Delete(prm)
+ res, err := st.Delete(ctx, prm)
if err == nil {
- logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
+ success = true
+ logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
}
return res, err
diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go
index 7a5a009577..c155e15b82 100644
--- a/pkg/local_object_storage/blobstor/exists.go
+++ b/pkg/local_object_storage/blobstor/exists.go
@@ -1,7 +1,15 @@
package blobstor
import (
+ "context"
+ "encoding/hex"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -9,15 +17,34 @@ import (
//
// Returns any error encountered that did not allow
// to completely check object existence.
-func (b *BlobStor) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
+func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) {
+ var (
+ exists = false
+ startedAt = time.Now()
+ )
+ defer func() {
+ b.metrics.Exists(time.Since(startedAt), exists, prm.StorageID != nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Exists",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID != nil {
if len(prm.StorageID) == 0 {
- return b.storage[len(b.storage)-1].Storage.Exists(prm)
+ res, err := b.storage[len(b.storage)-1].Storage.Exists(ctx, prm)
+ exists = err == nil && res.Exists
+ return res, err
}
- return b.storage[0].Storage.Exists(prm)
+ res, err := b.storage[0].Storage.Exists(ctx, prm)
+ exists = err == nil && res.Exists
+ return res, err
}
// If there was an error during existence check below,
@@ -31,8 +58,9 @@ func (b *BlobStor) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
// error | error | log the first error, return the second
var errors []error
for i := range b.storage {
- res, err := b.storage[i].Storage.Exists(prm)
+ res, err := b.storage[i].Storage.Exists(ctx, prm)
if err == nil && res.Exists {
+ exists = true
return res, nil
} else if err != nil {
errors = append(errors, err)
@@ -44,9 +72,9 @@ func (b *BlobStor) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
}
for _, err := range errors[:len(errors)-1] {
- b.log.Warn("error occurred during object existence checking",
+ b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
zap.Stringer("address", prm.Address),
- zap.String("error", err.Error()))
+ zap.Error(err))
}
return common.ExistsRes{}, errors[len(errors)-1]
diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go
index 54f3eb1f70..7eb7d49bf4 100644
--- a/pkg/local_object_storage/blobstor/exists_test.go
+++ b/pkg/local_object_storage/blobstor/exists_test.go
@@ -1,12 +1,13 @@
package blobstor
import (
- "os"
- "path/filepath"
+ "context"
"testing"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -14,16 +15,14 @@ import (
)
func TestExists(t *testing.T) {
- dir, err := os.MkdirTemp("", "frostfs*")
- require.NoError(t, err)
- t.Cleanup(func() { _ = os.RemoveAll(dir) })
-
const smallSizeLimit = 512
- b := New(
- WithStorages(defaultStorages(dir, smallSizeLimit)))
- require.NoError(t, b.Open(false))
- require.NoError(t, b.Init())
+ storages, _, largeFileStorage := defaultTestStorages(t.TempDir(), smallSizeLimit)
+
+ b := New(WithStorages(storages))
+
+ require.NoError(t, b.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, b.Init(context.Background()))
objects := []*objectSDK.Object{
testObject(smallSizeLimit / 2),
@@ -33,7 +32,7 @@ func TestExists(t *testing.T) {
for i := range objects {
var prm common.PutPrm
prm.Object = objects[i]
- _, err = b.Put(prm)
+ _, err := b.Put(context.Background(), prm)
require.NoError(t, err)
}
@@ -41,41 +40,32 @@ func TestExists(t *testing.T) {
for i := range objects {
prm.Address = objectCore.AddressOf(objects[i])
- res, err := b.Exists(prm)
+ res, err := b.Exists(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Exists)
}
prm.Address = oidtest.Address()
- res, err := b.Exists(prm)
+ res, err := b.Exists(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Exists)
- t.Run("corrupt direcrory", func(t *testing.T) {
- var bigDir string
- de, err := os.ReadDir(dir)
- require.NoError(t, err)
- for i := range de {
- if de[i].Name() != blobovniczaDir {
- bigDir = filepath.Join(dir, de[i].Name())
- break
- }
- }
- require.NotEmpty(t, bigDir)
-
- require.NoError(t, os.Chmod(dir, 0))
- t.Cleanup(func() { require.NoError(t, os.Chmod(dir, 0777)) })
+ t.Run("corrupt directory", func(t *testing.T) {
+ largeFileStorage.SetOption(teststore.WithExists(func(common.ExistsPrm) (common.ExistsRes, error) {
+ return common.ExistsRes{}, teststore.ErrDiskExploded
+ }))
// Object exists, first error is logged.
prm.Address = objectCore.AddressOf(objects[0])
- res, err := b.Exists(prm)
+ res, err := b.Exists(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Exists)
// Object doesn't exist, first error is returned.
prm.Address = objectCore.AddressOf(objects[1])
- _, err = b.Exists(prm)
+ _, err = b.Exists(context.Background(), prm)
require.Error(t, err)
+ require.ErrorIs(t, err, teststore.ErrDiskExploded)
})
}
diff --git a/pkg/local_object_storage/blobstor/fstree/control.go b/pkg/local_object_storage/blobstor/fstree/control.go
index 1ff74893d3..2544729f73 100644
--- a/pkg/local_object_storage/blobstor/fstree/control.go
+++ b/pkg/local_object_storage/blobstor/fstree/control.go
@@ -1,19 +1,36 @@
package fstree
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
)
// Open implements common.Storage.
-func (t *FSTree) Open(ro bool) error {
- t.readOnly = ro
+func (t *FSTree) Open(mode mode.ComponentMode) error {
+ t.readOnly = mode.ReadOnly()
+ t.metrics.SetMode(mode)
return nil
}
// Init implements common.Storage.
func (t *FSTree) Init() error {
- return util.MkdirAllX(t.RootPath, t.Permissions)
+ if err := util.MkdirAllX(t.RootPath, t.Permissions); err != nil {
+ return err
+ }
+ if !t.readOnly {
+ f := newSpecificWriteData(t.fileCounter, t.RootPath, t.Permissions, t.noSync)
+ if f != nil {
+ t.writer = f
+ }
+ }
+
+ return t.initFileCounter()
}
// Close implements common.Storage.
-func (*FSTree) Close() error { return nil }
+func (t *FSTree) Close(_ context.Context) error {
+ t.metrics.Close()
+ return nil
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go
new file mode 100644
index 0000000000..3caee7ee16
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/fstree/counter.go
@@ -0,0 +1,69 @@
+package fstree
+
+import (
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+)
+
+// FileCounter used to count files in FSTree. The implementation must be thread-safe.
+type FileCounter interface {
+ Set(count, size uint64)
+ Inc(size uint64)
+ Dec(size uint64)
+}
+
+type noopCounter struct{}
+
+func (c *noopCounter) Set(uint64, uint64) {}
+func (c *noopCounter) Inc(uint64) {}
+func (c *noopCounter) Dec(uint64) {}
+
+func counterEnabled(c FileCounter) bool {
+ _, noop := c.(*noopCounter)
+ return !noop
+}
+
+type SimpleCounter struct {
+ mtx sync.RWMutex
+ count uint64
+ size uint64
+}
+
+func NewSimpleCounter() *SimpleCounter {
+ return &SimpleCounter{}
+}
+
+func (c *SimpleCounter) Set(count, size uint64) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ c.count = count
+ c.size = size
+}
+
+func (c *SimpleCounter) Inc(size uint64) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ c.count++
+ c.size += size
+}
+
+func (c *SimpleCounter) Dec(size uint64) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ assert.True(c.count > 0, "fstree.SimpleCounter: invalid count")
+ c.count--
+
+ assert.True(c.size >= size, "fstree.SimpleCounter: invalid size")
+ c.size -= size
+}
+
+func (c *SimpleCounter) CountSize() (uint64, uint64) {
+ c.mtx.RLock()
+ defer c.mtx.RUnlock()
+
+ return c.count, c.size
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 3265e68f3b..112741ab41 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -1,6 +1,7 @@
package fstree
import (
+ "context"
"crypto/sha256"
"errors"
"fmt"
@@ -10,27 +11,51 @@ import (
"strconv"
"strings"
"syscall"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
)
+type keyLock interface {
+ Lock(string)
+ Unlock(string)
+}
+
+type noopKeyLock struct{}
+
+func (l *noopKeyLock) Lock(string) {}
+func (l *noopKeyLock) Unlock(string) {}
+
// FSTree represents an object storage as a filesystem tree.
type FSTree struct {
Info
- *compression.Config
+ log *logger.Logger
+
+ compressor *compression.Compressor
Depth uint64
DirNameLen int
noSync bool
readOnly bool
+ metrics Metrics
+
+ fileCounter FileCounter
+
+ writer writer
}
// Info groups the information about file storage.
@@ -54,16 +79,20 @@ var _ common.Storage = (*FSTree)(nil)
func New(opts ...Option) *FSTree {
f := &FSTree{
Info: Info{
- Permissions: 0700,
+ Permissions: 0o700,
RootPath: "./",
},
- Config: nil,
- Depth: 4,
- DirNameLen: DirNameLen,
+ compressor: nil,
+ Depth: 4,
+ DirNameLen: DirNameLen,
+ metrics: &noopMetrics{},
+ fileCounter: &noopCounter{},
+ log: logger.NewLoggerWrapper(zap.L()),
}
for i := range opts {
opts[i](f)
}
+ f.writer = newGenericWriteData(f.fileCounter, f.Permissions, f.noSync)
return f
}
@@ -96,15 +125,36 @@ func addressFromString(s string) (oid.Address, error) {
}
// Iterate iterates over all stored objects.
-func (t *FSTree) Iterate(prm common.IteratePrm) (common.IterateRes, error) {
- return common.IterateRes{}, t.iterate(0, []string{t.RootPath}, prm)
+func (t *FSTree) Iterate(ctx context.Context, prm common.IteratePrm) (common.IterateRes, error) {
+ var (
+ err error
+ startedAt = time.Now()
+ )
+
+ defer func() {
+ t.metrics.Iterate(time.Since(startedAt), err == nil)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.Iterate",
+ trace.WithAttributes(
+ attribute.String("path", t.RootPath),
+ attribute.Bool("ignore_errors", prm.IgnoreErrors),
+ ))
+ defer span.End()
+
+ err = t.iterate(ctx, 0, []string{t.RootPath}, prm)
+ return common.IterateRes{}, err
}
-func (t *FSTree) iterate(depth uint64, curPath []string, prm common.IteratePrm) error {
+func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, prm common.IteratePrm) error {
curName := strings.Join(curPath[1:], "")
- des, err := os.ReadDir(filepath.Join(curPath...))
+ dirPath := filepath.Join(curPath...)
+ des, err := os.ReadDir(dirPath)
if err != nil {
if prm.IgnoreErrors {
+ t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.Error(err),
+ zap.String("directory_path", dirPath))
return nil
}
return err
@@ -115,10 +165,15 @@ func (t *FSTree) iterate(depth uint64, curPath []string, prm common.IteratePrm)
curPath = append(curPath, "")
for i := range des {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
curPath[l] = des[i].Name()
if !isLast && des[i].IsDir() {
- err := t.iterate(depth+1, curPath, prm)
+ err := t.iterate(ctx, depth+1, curPath, prm)
if err != nil {
// Must be error from handler in case errors are ignored.
// Need to report.
@@ -134,34 +189,106 @@ func (t *FSTree) iterate(depth uint64, curPath []string, prm common.IteratePrm)
if err != nil {
continue
}
-
- if prm.LazyHandler != nil {
- err = prm.LazyHandler(addr, func() ([]byte, error) {
- return os.ReadFile(filepath.Join(curPath...))
- })
- } else {
- var data []byte
- data, err = os.ReadFile(filepath.Join(curPath...))
- if err == nil {
- data, err = t.Decompress(data)
- }
- if err != nil {
- if prm.IgnoreErrors {
- if prm.ErrorHandler != nil {
- return prm.ErrorHandler(addr, err)
- }
- continue
- }
- return err
- }
-
- err = prm.Handler(common.IterationElement{
- Address: addr,
- ObjectData: data,
- StorageID: []byte{},
- })
+ path := filepath.Join(curPath...)
+ data, err := os.ReadFile(path)
+ if err != nil && os.IsNotExist(err) {
+ continue
}
+ if err == nil {
+ data, err = t.compressor.Decompress(data)
+ }
+ if err != nil {
+ if prm.IgnoreErrors {
+ t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.Stringer("address", addr),
+ zap.Error(err),
+ zap.String("path", path))
+ continue
+ }
+ return err
+ }
+
+ err = prm.Handler(common.IterationElement{
+ Address: addr,
+ ObjectData: data,
+ StorageID: []byte{},
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type ObjectInfo struct {
+ Address oid.Address
+ DataSize uint64
+}
+type IterateInfoHandler func(ObjectInfo) error
+
+func (t *FSTree) IterateInfo(ctx context.Context, handler IterateInfoHandler) error {
+ var (
+ err error
+ startedAt = time.Now()
+ )
+ defer func() {
+ t.metrics.IterateInfo(time.Since(startedAt), err == nil)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.IterateInfo")
+ defer span.End()
+
+ return t.iterateInfo(ctx, 0, []string{t.RootPath}, handler)
+}
+
+func (t *FSTree) iterateInfo(ctx context.Context, depth uint64, curPath []string, handler IterateInfoHandler) error {
+ curName := strings.Join(curPath[1:], "")
+ dirPath := filepath.Join(curPath...)
+ entries, err := os.ReadDir(dirPath)
+ if err != nil {
+ return fmt.Errorf("read fstree dir '%s': %w", dirPath, err)
+ }
+
+ isLast := depth >= t.Depth
+ l := len(curPath)
+ curPath = append(curPath, "")
+
+ for i := range entries {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ curPath[l] = entries[i].Name()
+
+ if !isLast && entries[i].IsDir() {
+ err := t.iterateInfo(ctx, depth+1, curPath, handler)
+ if err != nil {
+ return err
+ }
+ }
+
+ if depth != t.Depth {
+ continue
+ }
+
+ addr, err := addressFromString(curName + entries[i].Name())
+ if err != nil {
+ continue
+ }
+ info, err := entries[i].Info()
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ return err
+ }
+
+ err = handler(ObjectInfo{
+ Address: addr,
+ DataSize: uint64(info.Size()),
+ })
if err != nil {
return err
}
@@ -189,23 +316,50 @@ func (t *FSTree) treePath(addr oid.Address) string {
}
// Delete removes the object with the specified address from the storage.
-func (t *FSTree) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
+func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) {
+ var (
+ err error
+ startedAt = time.Now()
+ )
+ defer func() {
+ t.metrics.Delete(time.Since(startedAt), err == nil)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.Delete",
+ trace.WithAttributes(
+ attribute.String("path", t.RootPath),
+ attribute.String("address", prm.Address.EncodeToString()),
+ ))
+ defer span.End()
+
if t.readOnly {
- return common.DeleteRes{}, common.ErrReadOnly
+ err = common.ErrReadOnly
+ return common.DeleteRes{}, err
}
p := t.treePath(prm.Address)
-
- err := os.Remove(p)
- if err != nil && os.IsNotExist(err) {
- err = logicerr.Wrap(apistatus.ObjectNotFound{})
- }
+ err = t.writer.removeFile(p, prm.Size)
return common.DeleteRes{}, err
}
// Exists returns the path to the file with object contents if it exists in the storage
// and an error otherwise.
-func (t *FSTree) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
+func (t *FSTree) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) {
+ var (
+ success = false
+ startedAt = time.Now()
+ )
+ defer func() {
+ t.metrics.Exists(time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.Exists",
+ trace.WithAttributes(
+ attribute.String("path", t.RootPath),
+ attribute.String("address", prm.Address.EncodeToString()),
+ ))
+ defer span.End()
+
p := t.treePath(prm.Address)
_, err := os.Stat(p)
@@ -213,151 +367,122 @@ func (t *FSTree) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
if os.IsNotExist(err) {
err = nil
}
+ success = err == nil
return common.ExistsRes{Exists: found}, err
}
// Put puts an object in the storage.
-func (t *FSTree) Put(prm common.PutPrm) (common.PutRes, error) {
+func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
+ var (
+ size int
+ startedAt = time.Now()
+ err error
+ )
+ defer func() {
+ t.metrics.Put(time.Since(startedAt), size, err == nil)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.Put",
+ trace.WithAttributes(
+ attribute.String("path", t.RootPath),
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("dont_compress", prm.DontCompress),
+ ))
+ defer span.End()
+
if t.readOnly {
- return common.PutRes{}, common.ErrReadOnly
+ err = common.ErrReadOnly
+ return common.PutRes{}, err
}
p := t.treePath(prm.Address)
- if err := util.MkdirAllX(filepath.Dir(p), t.Permissions); err != nil {
+ if err = util.MkdirAllX(filepath.Dir(p), t.Permissions); err != nil {
+ if errors.Is(err, syscall.ENOSPC) {
+ err = common.ErrNoSpace
+ return common.PutRes{}, err
+ }
return common.PutRes{}, err
}
if !prm.DontCompress {
- prm.RawData = t.Compress(prm.RawData)
+ prm.RawData = t.compressor.Compress(prm.RawData)
}
- // Here is a situation:
- // Feb 09 13:10:37 buky neofs-node[32445]: 2023-02-09T13:10:37.161Z info log/log.go:13 local object storage operation {"shard_id": "SkT8BfjouW6t93oLuzQ79s", "address": "7NxFz4SruSi8TqXacr2Ae22nekMhgYk1sfkddJo9PpWk/5enyUJGCyU1sfrURDnHEjZFdbGqANVhayYGfdSqtA6wA", "op": "PUT", "type": "fstree", "storage_id": ""}
- // Feb 09 13:10:37 buky neofs-node[32445]: 2023-02-09T13:10:37.183Z info log/log.go:13 local object storage operation {"shard_id": "SkT8BfjouW6t93oLuzQ79s", "address": "7NxFz4SruSi8TqXacr2Ae22nekMhgYk1sfkddJo9PpWk/5enyUJGCyU1sfrURDnHEjZFdbGqANVhayYGfdSqtA6wA", "op": "metabase PUT"}
- // Feb 09 13:10:37 buky neofs-node[32445]: 2023-02-09T13:10:37.862Z debug policer/check.go:231 shortage of object copies detected {"component": "Object Policer", "object": "7NxFz4SruSi8TqXacr2Ae22nekMhgYk1sfkddJo9PpWk/5enyUJGCyU1sfrURDnHEjZFdbGqANVhayYGfdSqtA6wA", "shortage": 1}
- // Feb 09 13:10:37 buky neofs-node[32445]: 2023-02-09T13:10:37.862Z debug shard/get.go:124 object is missing in write-cache {"shard_id": "SkT8BfjouW6t93oLuzQ79s", "addr": "7NxFz4SruSi8TqXacr2Ae22nekMhgYk1sfkddJo9PpWk/5enyUJGCyU1sfrURDnHEjZFdbGqANVhayYGfdSqtA6wA", "skip_meta": false}
- //
- // 1. We put an object on node 1.
- // 2. Relentless policer sees that it has only 1 copy and tries to PUT it to node 2.
- // 3. PUT operation started by client at (1) also puts an object here.
- // 4. Now we have concurrent writes and one of `Rename` calls will return `no such file` error.
- // Even more than that, concurrent writes can corrupt data.
- //
- // So here is a solution:
- // 1. Write a file to 'name + 1'.
- // 2. If it exists, retry with temporary name being 'name + 2'.
- // 3. Set some reasonable number of attempts.
- //
- // It is a bit kludgey, but I am unusually proud about having found this out after
- // hours of research on linux kernel, dirsync mount option and ext4 FS, turned out
- // to be so hecking simple.
- // In a very rare situation we can have multiple partially written copies on disk,
- // this will be fixed in another issue (we should remove garbage on start).
- const retryCount = 5
- for i := 0; i < retryCount; i++ {
- tmpPath := p + "#" + strconv.FormatUint(uint64(i), 10)
- err := t.writeAndRename(tmpPath, p, prm.RawData)
- if err != syscall.EEXIST || i == retryCount-1 {
- return common.PutRes{StorageID: []byte{}}, err
- }
- }
-
- // unreachable, but precaution never hurts, especially 1 day before release.
- return common.PutRes{StorageID: []byte{}}, fmt.Errorf("couldn't read file after %d retries", retryCount)
-}
-
-// writeAndRename opens tmpPath exclusively, writes data to it and renames it to p.
-func (t *FSTree) writeAndRename(tmpPath, p string, data []byte) error {
- err := t.writeFile(tmpPath, data)
- if err != nil {
- var pe *fs.PathError
- if errors.As(err, &pe) {
- switch pe.Err {
- case syscall.ENOSPC:
- err = common.ErrNoSpace
- _ = os.RemoveAll(tmpPath)
- case syscall.EEXIST:
- return syscall.EEXIST
- }
- }
- } else {
- err = os.Rename(tmpPath, p)
- }
- return err
-}
-
-func (t *FSTree) writeFlags() int {
- flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC | os.O_EXCL
- if t.noSync {
- return flags
- }
- return flags | os.O_SYNC
-}
-
-// writeFile writes data to a file with path p.
-// The code is copied from `os.WriteFile` with minor corrections for flags.
-func (t *FSTree) writeFile(p string, data []byte) error {
- f, err := os.OpenFile(p, t.writeFlags(), t.Permissions)
- if err != nil {
- return err
- }
- _, err = f.Write(data)
- if err1 := f.Close(); err1 != nil && err == nil {
- err = err1
- }
- return err
-}
-
-// PutStream puts executes handler on a file opened for write.
-func (t *FSTree) PutStream(addr oid.Address, handler func(*os.File) error) error {
- if t.readOnly {
- return common.ErrReadOnly
- }
-
- p := t.treePath(addr)
-
- if err := util.MkdirAllX(filepath.Dir(p), t.Permissions); err != nil {
- return err
- }
-
- f, err := os.OpenFile(p, t.writeFlags(), t.Permissions)
- if err != nil {
- return err
- }
- defer f.Close()
-
- return handler(f)
+ size = len(prm.RawData)
+ return common.PutRes{StorageID: []byte{}}, t.writer.writeData(p, prm.RawData)
}
// Get returns an object from the storage by address.
-func (t *FSTree) Get(prm common.GetPrm) (common.GetRes, error) {
+func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ size = 0
+ )
+ defer func() {
+ t.metrics.Get(time.Since(startedAt), size, success)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "FSTree.Get",
+ trace.WithAttributes(
+ attribute.String("path", t.RootPath),
+ attribute.Bool("raw", prm.Raw),
+ attribute.String("address", prm.Address.EncodeToString()),
+ ))
+ defer span.End()
+
p := t.treePath(prm.Address)
- if _, err := os.Stat(p); os.IsNotExist(err) {
- return common.GetRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
+ var data []byte
+ var err error
+ {
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.Get.ReadFile")
+ defer span.End()
+
+ data, err = os.ReadFile(p)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ return common.GetRes{}, err
+ }
}
- data, err := os.ReadFile(p)
- if err != nil {
- return common.GetRes{}, err
- }
-
- data, err = t.Decompress(data)
+ data, err = t.compressor.Decompress(data)
if err != nil {
return common.GetRes{}, err
}
+ size = len(data)
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
return common.GetRes{}, err
}
-
- return common.GetRes{Object: obj, RawData: data}, err
+ success = true
+ return common.GetRes{Object: obj, RawData: data}, nil
}
// GetRange implements common.Storage.
-func (t *FSTree) GetRange(prm common.GetRangePrm) (common.GetRangeRes, error) {
- res, err := t.Get(common.GetPrm{Address: prm.Address})
+func (t *FSTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ size = 0
+ )
+ defer func() {
+ t.metrics.GetRange(time.Since(startedAt), size, success)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "FSTree.GetRange",
+ trace.WithAttributes(
+ attribute.String("path", t.RootPath),
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)),
+ attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)),
+ ))
+ defer span.End()
+
+ res, err := t.Get(ctx, common.GetPrm{Address: prm.Address})
if err != nil {
return common.GetRangeRes{}, err
}
@@ -367,35 +492,95 @@ func (t *FSTree) GetRange(prm common.GetRangePrm) (common.GetRangeRes, error) {
to := from + prm.Range.GetLength()
if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to {
- return common.GetRangeRes{}, logicerr.Wrap(apistatus.ObjectOutOfRange{})
+ return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange))
}
+ success = true
+ data := payload[from:to]
+ size = len(data)
return common.GetRangeRes{
- Data: payload[from:to],
+ Data: data,
}, nil
}
-// NumberOfObjects walks the file tree rooted at FSTree's root
-// and returns number of stored objects.
-func (t *FSTree) NumberOfObjects() (uint64, error) {
- var counter uint64
+// initFileCounter walks the file tree rooted at FSTree's root,
+// counts total items count, inits counter and returns number of stored objects.
+func (t *FSTree) initFileCounter() error {
+ if !counterEnabled(t.fileCounter) {
+ return nil
+ }
+ count, size, err := t.countFiles()
+ if err != nil {
+ return err
+ }
+ t.fileCounter.Set(count, size)
+ return nil
+}
+
+func (t *FSTree) countFiles() (uint64, uint64, error) {
+ var count, size uint64
// it is simpler to just consider every file
// that is not directory as an object
err := filepath.WalkDir(t.RootPath,
func(_ string, d fs.DirEntry, _ error) error {
+ if d.IsDir() {
+ return nil
+ }
+ count++
+ info, err := d.Info()
+ if err != nil {
+ return err
+ }
+ size += uint64(info.Size())
+
+ return nil
+ },
+ )
+ if err != nil {
+ return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
+ }
+
+ return count, size, nil
+}
+
+func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.ObjectsCount(time.Since(startedAt), success)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "FSTree.ObjectsCount",
+ trace.WithAttributes(
+ attribute.String("path", t.RootPath),
+ ))
+ defer span.End()
+
+ var result uint64
+
+ err := filepath.WalkDir(t.RootPath,
+ func(_ string, d fs.DirEntry, _ error) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
if !d.IsDir() {
- counter++
+ result++
}
return nil
},
)
if err != nil {
- return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
+ return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
}
-
- return counter, nil
+ success = true
+ return result, nil
}
// Type is fstree storage type used in logs and configuration.
@@ -412,11 +597,23 @@ func (t *FSTree) Path() string {
}
// SetCompressor implements common.Storage.
-func (t *FSTree) SetCompressor(cc *compression.Config) {
- t.Config = cc
+func (t *FSTree) SetCompressor(cc *compression.Compressor) {
+ t.compressor = cc
+}
+
+func (t *FSTree) Compressor() *compression.Compressor {
+ return t.compressor
}
// SetReportErrorFunc implements common.Storage.
-func (t *FSTree) SetReportErrorFunc(f func(string, error)) {
+func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) {
// Do nothing, FSTree can encounter only one error which is returned.
}
+
+func (t *FSTree) SetParentID(parentID string) {
+ t.metrics.SetParentID(parentID)
+}
+
+func (t *FSTree) Rebuild(_ context.Context, _ common.RebuildPrm) (common.RebuildRes, error) {
+ return common.RebuildRes{}, nil
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index 0e5525e77e..50dae46a74 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -1,10 +1,17 @@
package fstree
import (
+ "context"
+ "errors"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
)
func TestAddressToString(t *testing.T) {
@@ -21,10 +28,110 @@ func Benchmark_addressFromString(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
- for i := 0; i < b.N; i++ {
+ for range b.N {
_, err := addressFromString(s)
if err != nil {
b.Fatalf("benchmark error: %v", err)
}
}
}
+
+func TestObjectCounter(t *testing.T) {
+ t.Parallel()
+ counter := NewSimpleCounter()
+ fst := New(
+ WithPath(t.TempDir()),
+ WithDepth(2),
+ WithDirNameLen(2),
+ WithFileCounter(counter))
+ require.NoError(t, fst.Open(mode.ComponentReadWrite))
+ require.NoError(t, fst.Init())
+
+ count, size := counter.CountSize()
+ require.Equal(t, uint64(0), count)
+ require.Equal(t, uint64(0), size)
+
+ defer func() {
+ require.NoError(t, fst.Close(context.Background()))
+ }()
+
+ addr := oidtest.Address()
+ obj := objectSDK.New()
+ obj.SetID(addr.Object())
+ obj.SetContainerID(addr.Container())
+ obj.SetPayload([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0})
+
+ var putPrm common.PutPrm
+ putPrm.Address = addr
+ putPrm.RawData, _ = obj.Marshal()
+
+ var delPrm common.DeletePrm
+ delPrm.Address = addr
+
+ t.Run("without size hint", func(t *testing.T) {
+ eg, egCtx := errgroup.WithContext(context.Background())
+
+ eg.Go(func() error {
+ for range 1_000 {
+ _, err := fst.Put(egCtx, putPrm)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+
+ eg.Go(func() error {
+ var le logicerr.Logical
+ for range 1_000 {
+ _, err := fst.Delete(egCtx, delPrm)
+ if err != nil && !errors.As(err, &le) {
+ return err
+ }
+ }
+ return nil
+ })
+
+ require.NoError(t, eg.Wait())
+
+ count, size = counter.CountSize()
+ realCount, realSize, err := fst.countFiles()
+ require.NoError(t, err)
+ require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
+ require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
+ })
+
+ t.Run("with size hint", func(t *testing.T) {
+ delPrm.Size = uint64(len(putPrm.RawData))
+ eg, egCtx := errgroup.WithContext(context.Background())
+
+ eg.Go(func() error {
+ for range 1_000 {
+ _, err := fst.Put(egCtx, putPrm)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+
+ eg.Go(func() error {
+ var le logicerr.Logical
+ for range 1_000 {
+ _, err := fst.Delete(egCtx, delPrm)
+ if err != nil && !errors.As(err, &le) {
+ return err
+ }
+ }
+ return nil
+ })
+
+ require.NoError(t, eg.Wait())
+
+ count, size = counter.CountSize()
+ realCount, realSize, err := fst.countFiles()
+ require.NoError(t, err)
+ require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
+ require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
+ })
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
new file mode 100644
index 0000000000..6d633dad66
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
@@ -0,0 +1,138 @@
+package fstree
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+ "strconv"
+ "sync/atomic"
+ "syscall"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+)
+
+type writer interface {
+ writeData(string, []byte) error
+ removeFile(string, uint64) error
+}
+
+type genericWriter struct {
+ perm fs.FileMode
+ flags int
+
+ fileGuard keyLock
+ fileCounter FileCounter
+ fileCounterEnabled bool
+ suffix atomic.Uint64
+}
+
+func newGenericWriteData(c FileCounter, perm fs.FileMode, noSync bool) writer {
+ flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC | os.O_EXCL
+ if !noSync {
+ flags |= os.O_SYNC
+ }
+
+ var fileGuard keyLock = &noopKeyLock{}
+ fileCounterEnabled := counterEnabled(c)
+ if fileCounterEnabled {
+ fileGuard = utilSync.NewKeyLocker[string]()
+ }
+
+ w := &genericWriter{
+ perm: perm,
+ flags: flags,
+
+ fileCounterEnabled: fileCounterEnabled,
+ fileGuard: fileGuard,
+ fileCounter: c,
+ }
+ return w
+}
+
+func (w *genericWriter) writeData(p string, data []byte) error {
+ tmpPath := p + "#" + strconv.FormatUint(w.suffix.Add(1), 10)
+ return w.writeAndRename(tmpPath, p, data)
+}
+
+// writeAndRename opens tmpPath exclusively, writes data to it and renames it to p.
+func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error {
+ if w.fileCounterEnabled {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+ }
+
+ err := w.writeFile(tmpPath, data)
+ if err != nil {
+ var pe *fs.PathError
+ if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) {
+ err = common.ErrNoSpace
+ _ = os.RemoveAll(tmpPath)
+ }
+ return err
+ }
+
+ if w.fileCounterEnabled {
+ w.fileCounter.Inc(uint64(len(data)))
+ var targetFileExists bool
+ if _, e := os.Stat(p); e == nil {
+ targetFileExists = true
+ }
+ err = os.Rename(tmpPath, p)
+ if err == nil && targetFileExists {
+ w.fileCounter.Dec(uint64(len(data)))
+ }
+ } else {
+ err = os.Rename(tmpPath, p)
+ }
+ return err
+}
+
+// writeFile writes data to a file with path p.
+// The code is copied from `os.WriteFile` with minor corrections for flags.
+func (w *genericWriter) writeFile(p string, data []byte) error {
+ f, err := os.OpenFile(p, w.flags, w.perm)
+ if err != nil {
+ return err
+ }
+ _, err = f.Write(data)
+ if err1 := f.Close(); err1 != nil && err == nil {
+ err = err1
+ }
+ return err
+}
+
+func (w *genericWriter) removeFile(p string, size uint64) error {
+ var err error
+ if w.fileCounterEnabled {
+ err = w.removeWithCounter(p, size)
+ } else {
+ err = os.Remove(p)
+ }
+
+ if err != nil && os.IsNotExist(err) {
+ err = logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ return err
+}
+
+func (w *genericWriter) removeWithCounter(p string, size uint64) error {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+
+ if size == 0 {
+ stat, err := os.Stat(p)
+ if err != nil {
+ return err
+ }
+ size = uint64(stat.Size())
+ }
+
+ if err := os.Remove(p); err != nil {
+ return err
+ }
+ w.fileCounter.Dec(size)
+ return nil
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
new file mode 100644
index 0000000000..49cbda344b
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
@@ -0,0 +1,137 @@
+//go:build linux && !fstree_generic
+
+package fstree
+
+import (
+ "errors"
+ "io/fs"
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "golang.org/x/sys/unix"
+)
+
+type linuxWriter struct {
+ root string
+ perm uint32
+ flags int
+
+ fileGuard keyLock
+ fileCounter FileCounter
+ fileCounterEnabled bool
+}
+
+func newSpecificWriteData(c FileCounter, root string, perm fs.FileMode, noSync bool) writer {
+ flags := unix.O_WRONLY | unix.O_TMPFILE | unix.O_CLOEXEC
+ if !noSync {
+ flags |= unix.O_DSYNC
+ }
+ fd, err := unix.Open(root, flags, uint32(perm))
+ if err != nil {
+ // Which means that OS-specific writeData can't be created
+ // and FSTree should use the generic one.
+ return nil
+ }
+ _ = unix.Close(fd) // Don't care about error.
+ var fileGuard keyLock = &noopKeyLock{}
+ fileCounterEnabled := counterEnabled(c)
+ if fileCounterEnabled {
+ fileGuard = utilSync.NewKeyLocker[string]()
+ }
+ w := &linuxWriter{
+ root: root,
+ perm: uint32(perm),
+ flags: flags,
+ fileGuard: fileGuard,
+ fileCounter: c,
+ fileCounterEnabled: fileCounterEnabled,
+ }
+ return w
+}
+
+func (w *linuxWriter) writeData(p string, data []byte) error {
+ err := w.writeFile(p, data)
+ if errors.Is(err, unix.ENOSPC) {
+ return common.ErrNoSpace
+ }
+ return err
+}
+
+func (w *linuxWriter) writeFile(p string, data []byte) error {
+ if w.fileCounterEnabled {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+ }
+ fd, err := unix.Open(w.root, w.flags, w.perm)
+ if err != nil {
+ return err
+ }
+ written := 0
+ tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10)
+ n, err := unix.Write(fd, data)
+ for err == nil {
+ written += n
+
+ if written == len(data) {
+ err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW)
+ if err == nil {
+ w.fileCounter.Inc(uint64(len(data)))
+ }
+ if errors.Is(err, unix.EEXIST) {
+ err = nil
+ }
+ break
+ }
+
+ // From man 2 write:
+ // https://www.man7.org/linux/man-pages/man2/write.2.html
+ //
+ // Note that a successful write() may transfer fewer than count
+ // bytes. Such partial writes can occur for various reasons; for
+ // example, because there was insufficient space on the disk device
+ // to write all of the requested bytes, or because a blocked write()
+ // to a socket, pipe, or similar was interrupted by a signal handler
+ // after it had transferred some, but before it had transferred all
+ // of the requested bytes. In the event of a partial write, the
+ // caller can make another write() call to transfer the remaining
+ // bytes. The subsequent call will either transfer further bytes or
+ // may result in an error (e.g., if the disk is now full).
+ n, err = unix.Write(fd, data[written:])
+ }
+ errClose := unix.Close(fd)
+ if err != nil {
+ return err // Close() error is ignored, we have a better one.
+ }
+ return errClose
+}
+
+func (w *linuxWriter) removeFile(p string, size uint64) error {
+ if w.fileCounterEnabled {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+
+ if size == 0 {
+ var stat unix.Stat_t
+ err := unix.Stat(p, &stat)
+ if err != nil {
+ if err == unix.ENOENT {
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ return err
+ }
+ size = uint64(stat.Size)
+ }
+ }
+
+ err := unix.Unlink(p)
+ if err != nil && err == unix.ENOENT {
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ if err == nil {
+ w.fileCounter.Dec(size)
+ }
+ return err
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go
new file mode 100644
index 0000000000..7fae2e6951
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go
@@ -0,0 +1,42 @@
+//go:build linux && integration
+
+package fstree
+
+import (
+ "context"
+ "errors"
+ "os"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sys/unix"
+)
+
+func TestENOSPC(t *testing.T) {
+ dir, err := os.MkdirTemp(t.TempDir(), "ramdisk")
+ require.NoError(t, err)
+
+ f, err := os.CreateTemp(t.TempDir(), "ramdisk_*")
+ require.NoError(t, err)
+
+ err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M")
+ if errors.Is(err, unix.EPERM) {
+ t.Skipf("skip size tests: no permission to mount: %v", err)
+ return
+ }
+ require.NoError(t, err)
+ defer func() {
+ require.NoError(t, unix.Unmount(dir, 0))
+ }()
+
+ fst := New(WithPath(dir), WithDepth(1))
+ require.NoError(t, fst.Open(mode.ComponentReadWrite))
+ require.NoError(t, fst.Init())
+
+ _, err = fst.Put(context.Background(), common.PutPrm{
+ RawData: make([]byte, 10<<20),
+ })
+ require.ErrorIs(t, err, common.ErrNoSpace)
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_specific.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_specific.go
new file mode 100644
index 0000000000..67052d9477
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_specific.go
@@ -0,0 +1,11 @@
+//go:build !linux || fstree_generic
+
+package fstree
+
+import (
+ "io/fs"
+)
+
+func newSpecificWriteData(_ FileCounter, _ string, _ fs.FileMode, _ bool) writer {
+ return nil
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/generic_test.go b/pkg/local_object_storage/blobstor/fstree/generic_test.go
index 49f487d35e..757482c78f 100644
--- a/pkg/local_object_storage/blobstor/fstree/generic_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/generic_test.go
@@ -1,9 +1,6 @@
package fstree
import (
- "os"
- "path/filepath"
- "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -11,39 +8,31 @@ import (
)
func TestGeneric(t *testing.T) {
- defer func() { _ = os.RemoveAll(t.Name()) }()
-
- helper := func(t *testing.T, dir string) common.Storage {
+ newTreeFromPath := func(path string) common.Storage {
return New(
- WithPath(dir),
+ WithPath(path),
WithDepth(2),
WithDirNameLen(2))
}
- var n int
newTree := func(t *testing.T) common.Storage {
- dir := filepath.Join(t.Name(), strconv.Itoa(n))
- return helper(t, dir)
+ return newTreeFromPath(t.TempDir())
}
blobstortest.TestAll(t, newTree, 2048, 16*1024)
t.Run("info", func(t *testing.T) {
- dir := filepath.Join(t.Name(), "info")
- blobstortest.TestInfo(t, func(t *testing.T) common.Storage {
- return helper(t, dir)
- }, Type, dir)
+ path := t.TempDir()
+ blobstortest.TestInfo(t, func(*testing.T) common.Storage {
+ return newTreeFromPath(path)
+ }, Type, path)
})
}
func TestControl(t *testing.T) {
- defer func() { _ = os.RemoveAll(t.Name()) }()
-
- var n int
newTree := func(t *testing.T) common.Storage {
- dir := filepath.Join(t.Name(), strconv.Itoa(n))
return New(
- WithPath(dir),
+ WithPath(t.TempDir()),
WithDepth(2),
WithDirNameLen(2))
}
diff --git a/pkg/local_object_storage/blobstor/fstree/metrics.go b/pkg/local_object_storage/blobstor/fstree/metrics.go
new file mode 100644
index 0000000000..4241beec97
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/fstree/metrics.go
@@ -0,0 +1,37 @@
+package fstree
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+type Metrics interface {
+ SetParentID(parentID string)
+
+ SetMode(mode mode.ComponentMode)
+ Close()
+
+ Iterate(d time.Duration, success bool)
+ IterateInfo(d time.Duration, success bool)
+ Delete(d time.Duration, success bool)
+ Exists(d time.Duration, success bool)
+ Put(d time.Duration, size int, success bool)
+ Get(d time.Duration, size int, success bool)
+ GetRange(d time.Duration, size int, success bool)
+ ObjectsCount(d time.Duration, success bool)
+}
+
+type noopMetrics struct{}
+
+func (m *noopMetrics) SetParentID(string) {}
+func (m *noopMetrics) SetMode(mode.ComponentMode) {}
+func (m *noopMetrics) Close() {}
+func (m *noopMetrics) Iterate(time.Duration, bool) {}
+func (m *noopMetrics) IterateInfo(time.Duration, bool) {}
+func (m *noopMetrics) Delete(time.Duration, bool) {}
+func (m *noopMetrics) Exists(time.Duration, bool) {}
+func (m *noopMetrics) Put(time.Duration, int, bool) {}
+func (m *noopMetrics) Get(time.Duration, int, bool) {}
+func (m *noopMetrics) GetRange(time.Duration, int, bool) {}
+func (m *noopMetrics) ObjectsCount(time.Duration, bool) {}
diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go
index 07e5474445..6f2ac87e1d 100644
--- a/pkg/local_object_storage/blobstor/fstree/option.go
+++ b/pkg/local_object_storage/blobstor/fstree/option.go
@@ -2,6 +2,8 @@ package fstree
import (
"io/fs"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
)
type Option func(*FSTree)
@@ -35,3 +37,21 @@ func WithNoSync(noSync bool) Option {
f.noSync = noSync
}
}
+
+func WithMetrics(m Metrics) Option {
+ return func(f *FSTree) {
+ f.metrics = m
+ }
+}
+
+func WithFileCounter(c FileCounter) Option {
+ return func(f *FSTree) {
+ f.fileCounter = c
+ }
+}
+
+func WithLogger(l *logger.Logger) Option {
+ return func(f *FSTree) {
+ f.log = l
+ }
+}
diff --git a/pkg/local_object_storage/blobstor/generic_test.go b/pkg/local_object_storage/blobstor/generic_test.go
index 533efcb554..b58ab8a68c 100644
--- a/pkg/local_object_storage/blobstor/generic_test.go
+++ b/pkg/local_object_storage/blobstor/generic_test.go
@@ -1,23 +1,15 @@
package blobstor
import (
- "os"
- "path/filepath"
- "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest"
)
func TestGeneric(t *testing.T) {
- defer func() { _ = os.RemoveAll(t.Name()) }()
-
- var n int
newMetabase := func(t *testing.T) storagetest.Component {
- n++
- dir := filepath.Join(t.Name(), strconv.Itoa(n))
return New(
- WithStorages(defaultStorages(dir, 128)))
+ WithStorages(defaultStorages(t.TempDir(), 128)))
}
storagetest.TestAll(t, newMetabase)
diff --git a/pkg/local_object_storage/blobstor/get.go b/pkg/local_object_storage/blobstor/get.go
index 6caa61b84e..d00ef2f21f 100644
--- a/pkg/local_object_storage/blobstor/get.go
+++ b/pkg/local_object_storage/blobstor/get.go
@@ -1,32 +1,53 @@
package blobstor
import (
- "errors"
+ "context"
+ "encoding/hex"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Get reads the object from b.
// If the descriptor is present, only one sub-storage is tried,
// Otherwise, each sub-storage is tried in order.
-func (b *BlobStor) Get(prm common.GetPrm) (common.GetRes, error) {
+func (b *BlobStor) Get(ctx context.Context, prm common.GetPrm) (res common.GetRes, err error) {
+ startedAt := time.Now()
+ defer func() {
+ b.metrics.Get(time.Since(startedAt), len(res.RawData), err == nil, prm.StorageID != nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Get",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("raw", prm.Raw),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID == nil {
for i := range b.storage {
- res, err := b.storage[i].Storage.Get(prm)
- if err == nil || !errors.As(err, new(apistatus.ObjectNotFound)) {
+ res, err = b.storage[i].Storage.Get(ctx, prm)
+ if err == nil || !client.IsErrObjectNotFound(err) {
return res, err
}
}
- return common.GetRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if len(prm.StorageID) == 0 {
- return b.storage[len(b.storage)-1].Storage.Get(prm)
+ res, err = b.storage[len(b.storage)-1].Storage.Get(ctx, prm)
+ } else {
+ res, err = b.storage[0].Storage.Get(ctx, prm)
}
- return b.storage[0].Storage.Get(prm)
+ return res, err
}
diff --git a/pkg/local_object_storage/blobstor/get_range.go b/pkg/local_object_storage/blobstor/get_range.go
index 93939cabb0..9bded4720d 100644
--- a/pkg/local_object_storage/blobstor/get_range.go
+++ b/pkg/local_object_storage/blobstor/get_range.go
@@ -1,32 +1,55 @@
package blobstor
import (
- "errors"
+ "context"
+ "encoding/hex"
+ "strconv"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// GetRange reads object payload data from b.
// If the descriptor is present, only one sub-storage is tried,
// Otherwise, each sub-storage is tried in order.
-func (b *BlobStor) GetRange(prm common.GetRangePrm) (common.GetRangeRes, error) {
+func (b *BlobStor) GetRange(ctx context.Context, prm common.GetRangePrm) (res common.GetRangeRes, err error) {
+ startedAt := time.Now()
+ defer func() {
+ b.metrics.GetRange(time.Since(startedAt), len(res.Data), err == nil, prm.StorageID != nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.GetRange",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)),
+ attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID == nil {
for i := range b.storage {
- res, err := b.storage[i].Storage.GetRange(prm)
- if err == nil || !errors.As(err, new(apistatus.ObjectNotFound)) {
+ res, err = b.storage[i].Storage.GetRange(ctx, prm)
+ if err == nil || !client.IsErrObjectNotFound(err) {
return res, err
}
}
- return common.GetRangeRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if len(prm.StorageID) == 0 {
- return b.storage[len(b.storage)-1].Storage.GetRange(prm)
+ res, err = b.storage[len(b.storage)-1].Storage.GetRange(ctx, prm)
+ } else {
+ res, err = b.storage[0].Storage.GetRange(ctx, prm)
}
- return b.storage[0].Storage.GetRange(prm)
+ return res, err
}
diff --git a/pkg/local_object_storage/blobstor/info.go b/pkg/local_object_storage/blobstor/info.go
index 2fd62af818..c1c47f3bb1 100644
--- a/pkg/local_object_storage/blobstor/info.go
+++ b/pkg/local_object_storage/blobstor/info.go
@@ -1,5 +1,14 @@
package blobstor
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "golang.org/x/sync/errgroup"
+)
+
// DumpInfo returns information about blob stor.
func (b *BlobStor) DumpInfo() Info {
b.modeMtx.RLock()
@@ -15,3 +24,38 @@ func (b *BlobStor) DumpInfo() Info {
SubStorages: sub,
}
}
+
+// ObjectsCount returns Blobstore's total objects count.
+func (b *BlobStor) ObjectsCount(ctx context.Context) (uint64, error) {
+ var err error
+ startedAt := time.Now()
+ defer func() {
+ b.metrics.ObjectsCount(time.Since(startedAt), err == nil)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.ObjectsCount")
+ defer span.End()
+
+ b.modeMtx.RLock()
+ defer b.modeMtx.RUnlock()
+
+ var result atomic.Uint64
+
+ eg, egCtx := errgroup.WithContext(ctx)
+ for i := range b.storage {
+ eg.Go(func() error {
+ v, e := b.storage[i].Storage.ObjectsCount(egCtx)
+ if e != nil {
+ return e
+ }
+ result.Add(v)
+ return nil
+ })
+ }
+
+ if err = eg.Wait(); err != nil {
+ return 0, err
+ }
+
+ return result.Load(), nil
+}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
index b2663be217..5d14a9a3ae 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
@@ -1,8 +1,10 @@
package blobstortest
import (
- "math/rand"
+ "context"
+ mrand "math/rand"
"testing"
+ "time"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -25,21 +27,21 @@ type objectDesc struct {
storageID []byte
}
-func TestAll(t *testing.T, cons Constructor, min, max uint64) {
+func TestAll(t *testing.T, cons Constructor, minSize, maxSize uint64) {
t.Run("get", func(t *testing.T) {
- TestGet(t, cons, min, max)
+ TestGet(t, cons, minSize, maxSize)
})
t.Run("get range", func(t *testing.T) {
- TestGetRange(t, cons, min, max)
+ TestGetRange(t, cons, minSize, maxSize)
})
t.Run("delete", func(t *testing.T) {
- TestDelete(t, cons, min, max)
+ TestDelete(t, cons, minSize, maxSize)
})
t.Run("exists", func(t *testing.T) {
- TestExists(t, cons, min, max)
+ TestExists(t, cons, minSize, maxSize)
})
t.Run("iterate", func(t *testing.T) {
- TestIterate(t, cons, min, max)
+ TestIterate(t, cons, minSize, maxSize)
})
}
@@ -49,11 +51,12 @@ func TestInfo(t *testing.T, cons Constructor, expectedType string, expectedPath
require.Equal(t, expectedPath, s.Path())
}
-func prepare(t *testing.T, count int, s common.Storage, min, max uint64) []objectDesc {
+func prepare(t *testing.T, count int, s common.Storage, minSize, maxSize uint64) []objectDesc {
objects := make([]objectDesc, count)
+ r := mrand.New(mrand.NewSource(0))
for i := range objects {
- objects[i].obj = NewObject(min + uint64(rand.Intn(int(max-min+1)))) // not too large
+ objects[i].obj = NewObject(minSize + uint64(r.Intn(int(maxSize-minSize+1)))) // not too large
objects[i].addr = objectCore.AddressOf(objects[i].obj)
raw, err := objects[i].obj.Marshal()
@@ -67,7 +70,7 @@ func prepare(t *testing.T, count int, s common.Storage, min, max uint64) []objec
prm.Object = objects[i].obj
prm.RawData = objects[i].raw
- putRes, err := s.Put(prm)
+ putRes, err := s.Put(context.Background(), prm)
require.NoError(t, err)
objects[i].storageID = putRes.StorageID
@@ -84,7 +87,8 @@ func NewObject(sz uint64) *objectSDK.Object {
raw.SetContainerID(cidtest.ID())
payload := make([]byte, sz)
- rand.Read(payload)
+ r := mrand.New(mrand.NewSource(time.Now().UnixNano()))
+ r.Read(payload)
raw.SetPayload(payload)
// fit the binary size to the required
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
index 0a74495d70..b8e88f84a0 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
@@ -1,41 +1,43 @@
package blobstortest
import (
+ "context"
"math/rand"
"testing"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
)
// TestControl checks correctness of a read-only mode.
// cons must return a storage which is NOT opened.
-func TestControl(t *testing.T, cons Constructor, min, max uint64) {
+func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
- require.NoError(t, s.Open(false))
+ require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- objects := prepare(t, 10, s, min, max)
- require.NoError(t, s.Close())
+ objects := prepare(t, 10, s, minSize, maxSize)
+ require.NoError(t, s.Close(context.Background()))
- require.NoError(t, s.Open(true))
+ require.NoError(t, s.Open(mode.ComponentReadOnly))
for i := range objects {
var prm common.GetPrm
prm.Address = objects[i].addr
prm.StorageID = objects[i].storageID
prm.Raw = true
- _, err := s.Get(prm)
+ _, err := s.Get(context.Background(), prm)
require.NoError(t, err)
}
t.Run("put fails", func(t *testing.T) {
var prm common.PutPrm
- prm.Object = NewObject(min + uint64(rand.Intn(int(max-min+1))))
+ prm.Object = NewObject(minSize + uint64(rand.Intn(int(maxSize-minSize+1))))
prm.Address = objectCore.AddressOf(prm.Object)
- _, err := s.Put(prm)
+ _, err := s.Put(context.Background(), prm)
require.ErrorIs(t, err, common.ErrReadOnly)
})
t.Run("delete fails", func(t *testing.T) {
@@ -43,7 +45,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[0].addr
prm.StorageID = objects[0].storageID
- _, err := s.Delete(prm)
+ _, err := s.Delete(context.Background(), prm)
require.ErrorIs(t, err, common.ErrReadOnly)
})
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
index f3bb4c3f26..3a163f6b17 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
@@ -1,28 +1,30 @@
package blobstortest
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
-func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
+func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
- require.NoError(t, s.Open(false))
+ require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- t.Cleanup(func() { require.NoError(t, s.Close()) })
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
- objects := prepare(t, 4, s, min, max)
+ objects := prepare(t, 4, s, minSize, maxSize)
t.Run("delete non-existent", func(t *testing.T) {
var prm common.DeletePrm
prm.Address = oidtest.Address()
- _, err := s.Delete(prm)
- require.Error(t, err, new(apistatus.ObjectNotFound))
+ _, err := s.Delete(context.Background(), prm)
+ require.True(t, client.IsErrObjectNotFound(err))
})
t.Run("with storage ID", func(t *testing.T) {
@@ -30,31 +32,31 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[0].addr
prm.StorageID = objects[0].storageID
- _, err := s.Delete(prm)
+ _, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
t.Run("exists fail", func(t *testing.T) {
prm := common.ExistsPrm{Address: oidtest.Address()}
- res, err := s.Exists(prm)
+ res, err := s.Exists(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Exists)
})
t.Run("get fail", func(t *testing.T) {
prm := common.GetPrm{Address: oidtest.Address()}
- _, err := s.Get(prm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ _, err := s.Get(context.Background(), prm)
+ require.True(t, client.IsErrObjectNotFound(err))
})
t.Run("getrange fail", func(t *testing.T) {
prm := common.GetRangePrm{Address: oidtest.Address()}
- _, err := s.GetRange(prm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ _, err := s.GetRange(context.Background(), prm)
+ require.True(t, client.IsErrObjectNotFound(err))
})
})
t.Run("without storage ID", func(t *testing.T) {
var prm common.DeletePrm
prm.Address = objects[1].addr
- _, err := s.Delete(prm)
+ _, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
})
@@ -63,11 +65,11 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[2].addr
prm.StorageID = objects[2].storageID
- _, err := s.Delete(prm)
+ _, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
- _, err = s.Delete(prm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ _, err = s.Delete(context.Background(), prm)
+ require.True(t, client.IsErrObjectNotFound(err))
})
t.Run("non-deleted object is still available", func(t *testing.T) {
@@ -75,7 +77,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[3].addr
prm.Raw = true
- res, err := s.Get(prm)
+ res, err := s.Get(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, objects[3].raw, res.RawData)
})
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
index ee16ddcb32..f34fe5f97e 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
@@ -1,24 +1,26 @@
package blobstortest
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
-func TestExists(t *testing.T, cons Constructor, min, max uint64) {
+func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
- require.NoError(t, s.Open(false))
+ require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- t.Cleanup(func() { require.NoError(t, s.Close()) })
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
- objects := prepare(t, 1, s, min, max)
+ objects := prepare(t, 1, s, minSize, maxSize)
t.Run("missing object", func(t *testing.T) {
prm := common.ExistsPrm{Address: oidtest.Address()}
- res, err := s.Exists(prm)
+ res, err := s.Exists(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Exists)
})
@@ -29,7 +31,7 @@ func TestExists(t *testing.T, cons Constructor, min, max uint64) {
t.Run("without storage ID", func(t *testing.T) {
prm.StorageID = nil
- res, err := s.Exists(prm)
+ res, err := s.Exists(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Exists)
})
@@ -37,7 +39,7 @@ func TestExists(t *testing.T, cons Constructor, min, max uint64) {
t.Run("with storage ID", func(t *testing.T) {
prm.StorageID = objects[0].storageID
- res, err := s.Exists(prm)
+ res, err := s.Exists(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Exists)
})
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
index cc3da6b4bf..af0f4b45dd 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
@@ -1,26 +1,28 @@
package blobstortest
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
-func TestGet(t *testing.T, cons Constructor, min, max uint64) {
+func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
- require.NoError(t, s.Open(false))
+ require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- t.Cleanup(func() { require.NoError(t, s.Close()) })
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
- objects := prepare(t, 2, s, min, max)
+ objects := prepare(t, 2, s, minSize, maxSize)
t.Run("missing object", func(t *testing.T) {
gPrm := common.GetPrm{Address: oidtest.Address()}
- _, err := s.Get(gPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ _, err := s.Get(context.Background(), gPrm)
+ require.True(t, client.IsErrObjectNotFound(err))
})
for i := range objects {
@@ -29,13 +31,13 @@ func TestGet(t *testing.T, cons Constructor, min, max uint64) {
// With storage ID.
gPrm.StorageID = objects[i].storageID
- res, err := s.Get(gPrm)
+ res, err := s.Get(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, objects[i].obj, res.Object)
// Without storage ID.
gPrm.StorageID = nil
- res, err = s.Get(gPrm)
+ res, err = s.Get(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, objects[i].obj, res.Object)
@@ -43,7 +45,7 @@ func TestGet(t *testing.T, cons Constructor, min, max uint64) {
gPrm.StorageID = objects[i].storageID
gPrm.Raw = true
- res, err = s.Get(gPrm)
+ res, err = s.Get(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, objects[i].raw, res.RawData)
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
index 9e4b1a4490..13032048c9 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
@@ -1,34 +1,37 @@
package blobstortest
import (
+ "context"
"math"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
-func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
+func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
- require.NoError(t, s.Open(false))
+ require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- t.Cleanup(func() { require.NoError(t, s.Close()) })
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
- objects := prepare(t, 1, s, min, max)
+ objects := prepare(t, 1, s, minSize, maxSize)
t.Run("missing object", func(t *testing.T) {
gPrm := common.GetRangePrm{Address: oidtest.Address()}
- _, err := s.GetRange(gPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ _, err := s.GetRange(context.Background(), gPrm)
+ require.True(t, client.IsErrObjectNotFound(err))
})
payload := objects[0].obj.Payload()
var start, stop uint64 = 11, 100
if uint64(len(payload)) < stop {
- panic("unexpected: invalid test object generated")
+ t.Fatalf("unexpected: invalid test object generated")
}
var gPrm common.GetRangePrm
@@ -38,14 +41,14 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
t.Run("without storage ID", func(t *testing.T) {
// Without storage ID.
- res, err := s.GetRange(gPrm)
+ res, err := s.GetRange(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, payload[start:stop], res.Data)
})
t.Run("with storage ID", func(t *testing.T) {
gPrm.StorageID = objects[0].storageID
- res, err := s.GetRange(gPrm)
+ res, err := s.GetRange(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, payload[start:stop], res.Data)
})
@@ -54,31 +57,31 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
gPrm.Range.SetOffset(uint64(len(payload) + 10))
gPrm.Range.SetLength(10)
- _, err := s.GetRange(gPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectOutOfRange))
+ _, err := s.GetRange(context.Background(), gPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange))
})
t.Run("offset + length > len(payload)", func(t *testing.T) {
gPrm.Range.SetOffset(10)
gPrm.Range.SetLength(uint64(len(payload)))
- _, err := s.GetRange(gPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectOutOfRange))
+ _, err := s.GetRange(context.Background(), gPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange))
})
t.Run("length is negative when converted to int64", func(t *testing.T) {
gPrm.Range.SetOffset(0)
gPrm.Range.SetLength(1 << 63)
- _, err := s.GetRange(gPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectOutOfRange))
+ _, err := s.GetRange(context.Background(), gPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange))
})
t.Run("offset + length overflow uint64", func(t *testing.T) {
gPrm.Range.SetOffset(10)
gPrm.Range.SetLength(math.MaxUint64 - 2)
- _, err := s.GetRange(gPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectOutOfRange))
+ _, err := s.GetRange(context.Background(), gPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange))
})
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
index 231df3effc..d54c54f59d 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
@@ -1,32 +1,40 @@
package blobstortest
import (
+ "context"
"errors"
+ "slices"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
)
-func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
+func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
- require.NoError(t, s.Open(false))
+ require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- t.Cleanup(func() { require.NoError(t, s.Close()) })
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
- objects := prepare(t, 10, s, min, max)
+ objects := prepare(t, 10, s, minSize, maxSize)
// Delete random object to ensure it is not iterated over.
const delID = 2
var delPrm common.DeletePrm
delPrm.Address = objects[2].addr
delPrm.StorageID = objects[2].storageID
- _, err := s.Delete(delPrm)
+ _, err := s.Delete(context.Background(), delPrm)
require.NoError(t, err)
- objects = append(objects[:delID], objects[delID+1:]...)
+ objects = slices.Delete(objects, delID, delID+1)
+ runTestNormalHandler(t, s, objects)
+
+ runTestIgnoreLogicalErrors(t, s, objects)
+}
+
+func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc) {
t.Run("normal handler", func(t *testing.T) {
seen := make(map[string]objectDesc)
@@ -40,9 +48,9 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
return nil
}
- _, err := s.Iterate(iterPrm)
+ _, err := s.Iterate(context.Background(), iterPrm)
require.NoError(t, err)
- require.Equal(t, len(objects), len(seen))
+ require.Len(t, objects, len(seen))
for i := range objects {
d, ok := seen[objects[i].addr.String()]
require.True(t, ok)
@@ -51,34 +59,14 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
require.Equal(t, objects[i].storageID, d.storageID)
}
})
+}
- t.Run("lazy handler", func(t *testing.T) {
- seen := make(map[string]func() ([]byte, error))
-
- var iterPrm common.IteratePrm
- iterPrm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
- seen[addr.String()] = f
- return nil
- }
-
- _, err := s.Iterate(iterPrm)
- require.NoError(t, err)
- require.Equal(t, len(objects), len(seen))
- for i := range objects {
- f, ok := seen[objects[i].addr.String()]
- require.True(t, ok)
-
- data, err := f()
- require.NoError(t, err)
- require.Equal(t, objects[i].raw, data)
- }
- })
-
+func runTestIgnoreLogicalErrors(t *testing.T, s common.Storage, objects []objectDesc) {
t.Run("ignore errors doesn't work for logical errors", func(t *testing.T) {
seen := make(map[string]objectDesc)
var n int
- var logicErr = errors.New("logic error")
+ logicErr := errors.New("logic error")
var iterPrm common.IteratePrm
iterPrm.IgnoreErrors = true
iterPrm.Handler = func(elem common.IterationElement) error {
@@ -94,9 +82,9 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
return nil
}
- _, err := s.Iterate(iterPrm)
+ _, err := s.Iterate(context.Background(), iterPrm)
require.Equal(t, err, logicErr)
- require.Equal(t, len(objects)/2, len(seen))
+ require.Len(t, seen, len(objects)/2)
for i := range objects {
d, ok := seen[objects[i].addr.String()]
if ok {
diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go
index 0461dd8031..ff1aa9d640 100644
--- a/pkg/local_object_storage/blobstor/iterate.go
+++ b/pkg/local_object_storage/blobstor/iterate.go
@@ -1,10 +1,16 @@
package blobstor
import (
+ "context"
"fmt"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -15,36 +21,51 @@ import (
// did not allow to completely iterate over the storage.
//
// If handler returns an error, method wraps and returns it immediately.
-func (b *BlobStor) Iterate(prm common.IteratePrm) (common.IterateRes, error) {
+func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.IterateRes, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ b.metrics.Iterate(time.Since(startedAt), success)
+ }()
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Iterate",
+ trace.WithAttributes(
+ attribute.Bool("ignore_errors", prm.IgnoreErrors),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
for i := range b.storage {
- _, err := b.storage[i].Storage.Iterate(prm)
- if err != nil && !prm.IgnoreErrors {
+ _, err := b.storage[i].Storage.Iterate(ctx, prm)
+ if err != nil {
+ if prm.IgnoreErrors {
+ b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.String("storage_path", b.storage[i].Storage.Path()),
+ zap.String("storage_type", b.storage[i].Storage.Type()),
+ zap.Error(err))
+ continue
+ }
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)
}
}
+ success = true
return common.IterateRes{}, nil
}
// IterateBinaryObjects is a helper function which iterates over BlobStor and passes binary objects to f.
// Errors related to object reading and unmarshaling are logged and skipped.
-func IterateBinaryObjects(blz *BlobStor, f func(addr oid.Address, data []byte, descriptor []byte) error) error {
+func IterateBinaryObjects(ctx context.Context, blz *BlobStor, f func(addr oid.Address, data []byte, descriptor []byte) error) error {
var prm common.IteratePrm
prm.Handler = func(elem common.IterationElement) error {
return f(elem.Address, elem.ObjectData, elem.StorageID)
}
prm.IgnoreErrors = true
- prm.ErrorHandler = func(addr oid.Address, err error) error {
- blz.log.Warn("error occurred during the iteration",
- zap.Stringer("address", addr),
- zap.String("err", err.Error()))
- return nil
- }
- _, err := blz.Iterate(prm)
+ _, err := blz.Iterate(ctx, prm)
return err
}
diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go
index b2a7ddfb96..2786321a82 100644
--- a/pkg/local_object_storage/blobstor/iterate_test.go
+++ b/pkg/local_object_storage/blobstor/iterate_test.go
@@ -1,11 +1,17 @@
package blobstor
import (
+ "context"
"encoding/binary"
+ "errors"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -19,18 +25,20 @@ func TestIterateObjects(t *testing.T) {
// create BlobStor instance
blobStor := New(
WithStorages(defaultStorages(p, smalSz)),
- WithCompressObjects(true),
+ WithCompression(compression.Config{
+ Enabled: true,
+ }),
)
defer os.RemoveAll(p)
// open Blobstor
- require.NoError(t, blobStor.Open(false))
+ require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
// initialize Blobstor
- require.NoError(t, blobStor.Init())
+ require.NoError(t, blobStor.Init(context.Background()))
- defer blobStor.Close()
+ defer blobStor.Close(context.Background())
const objNum = 5
@@ -42,7 +50,7 @@ func TestIterateObjects(t *testing.T) {
mObjs := make(map[string]addrData)
- for i := uint64(0); i < objNum; i++ {
+ for i := range uint64(objNum) {
sz := smalSz
big := i < objNum/2
@@ -63,11 +71,11 @@ func TestIterateObjects(t *testing.T) {
}
for _, v := range mObjs {
- _, err := blobStor.Put(common.PutPrm{Address: v.addr, RawData: v.data})
+ _, err := blobStor.Put(context.Background(), common.PutPrm{Address: v.addr, RawData: v.data})
require.NoError(t, err)
}
- err := IterateBinaryObjects(blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
+ err := IterateBinaryObjects(context.Background(), blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
v, ok := mObjs[string(data)]
require.True(t, ok)
@@ -88,117 +96,60 @@ func TestIterateObjects(t *testing.T) {
}
func TestIterate_IgnoreErrors(t *testing.T) {
- t.Skip()
- // dir := t.TempDir()
- //
- // const (
- // smallSize = 512
- // objCount = 5
- // )
- // bsOpts := []Option{
- // WithCompressObjects(true),
- // WithRootPath(dir),
- // WithSmallSizeLimit(smallSize * 2), // + header
- // WithBlobovniczaOpenedCacheSize(1),
- // WithBlobovniczaShallowWidth(1),
- // WithBlobovniczaShallowDepth(1)}
- // bs := New(bsOpts...)
- // require.NoError(t, bs.Open(false))
- // require.NoError(t, bs.Init())
- //
- // addrs := make([]oid.Address, objCount)
- // for i := range addrs {
- // addrs[i] = oidtest.Address()
- //
- // obj := object.New()
- // obj.SetContainerID(addrs[i].Container())
- // obj.SetID(addrs[i].Object())
- // obj.SetPayload(make([]byte, smallSize<<(i%2)))
- //
- // objData, err := obj.Marshal()
- // require.NoError(t, err)
- //
- // _, err = bs.PutRaw(addrs[i], objData, true)
- // require.NoError(t, err)
- // }
- //
- // // Construct corrupted compressed object.
- // buf := bytes.NewBuffer(nil)
- // badObject := make([]byte, smallSize/2+1)
- // enc, err := zstd.NewWriter(buf)
- // require.NoError(t, err)
- // rawData := enc.EncodeAll(badObject, nil)
- // for i := 4; /* magic size */ i < len(rawData); i += 2 {
- // rawData[i] ^= 0xFF
- // }
- // // Will be put uncompressed but fetched as compressed because of magic.
- // _, err = bs.PutRaw(oidtest.Address(), rawData, false)
- // require.NoError(t, err)
- // require.NoError(t, bs.fsTree.Put(oidtest.Address(), rawData))
- //
- // require.NoError(t, bs.Close())
- //
- // // Increase width to have blobovnicza which is definitely empty.
- // b := New(append(bsOpts, WithBlobovniczaShallowWidth(2))...)
- // require.NoError(t, b.Open(false))
- // require.NoError(t, b.Init())
- //
- // var p string
- // for i := 0; i < 2; i++ {
- // bp := filepath.Join(bs.rootPath, "1", strconv.FormatUint(uint64(i), 10))
- // if _, ok := bs.blobovniczas.opened.Get(bp); !ok {
- // p = bp
- // break
- // }
- // }
- // require.NotEqual(t, "", p, "expected to not have at least 1 blobovnicza in cache")
- // require.NoError(t, os.Chmod(p, 0))
- //
- // require.NoError(t, b.Close())
- // require.NoError(t, bs.Open(false))
- // require.NoError(t, bs.Init())
- //
- // var prm IteratePrm
- // prm.SetIterationHandler(func(e IterationElement) error {
- // return nil
- // })
- // _, err = bs.Iterate(prm)
- // require.Error(t, err)
- //
- // prm.IgnoreErrors()
- //
- // t.Run("skip invalid objects", func(t *testing.T) {
- // actual := make([]oid.Address, 0, len(addrs))
- // prm.SetIterationHandler(func(e IterationElement) error {
- // obj := object.New()
- // err := obj.Unmarshal(e.data)
- // if err != nil {
- // return err
- // }
- //
- // var addr oid.Address
- // cnr, _ := obj.ContainerID()
- // addr.SetContainer(cnr)
- // id, _ := obj.ID()
- // addr.SetObject(id)
- // actual = append(actual, addr)
- // return nil
- // })
- //
- // _, err := bs.Iterate(prm)
- // require.NoError(t, err)
- // require.ElementsMatch(t, addrs, actual)
- // })
- // t.Run("return errors from handler", func(t *testing.T) {
- // n := 0
- // expectedErr := errors.New("expected error")
- // prm.SetIterationHandler(func(e IterationElement) error {
- // if n++; n == objCount/2 {
- // return expectedErr
- // }
- // return nil
- // })
- // _, err := bs.Iterate(prm)
- // require.ErrorIs(t, err, expectedErr)
- // })
+ ctx := context.Background()
+
+ myErr := errors.New("unique error")
+ nopIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, nil }
+ panicIter := func(common.IteratePrm) (common.IterateRes, error) { panic("unreachable") }
+ errIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, myErr }
+
+ var s1iter, s2iter func(common.IteratePrm) (common.IterateRes, error)
+ st1 := teststore.New(
+ teststore.WithSubstorage(memstore.New()),
+ teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
+ return s1iter(prm)
+ }))
+ st2 := teststore.New(
+ teststore.WithSubstorage(memstore.New()),
+ teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
+ return s2iter(prm)
+ }))
+
+ bsOpts := []Option{WithStorages([]SubStorage{
+ {Storage: st1},
+ {Storage: st2},
+ })}
+ bs := New(bsOpts...)
+ require.NoError(t, bs.Open(ctx, mode.ReadWrite))
+ require.NoError(t, bs.Init(ctx))
+
+ nopHandler := func(e common.IterationElement) error {
+ return nil
+ }
+
+ t.Run("no errors", func(t *testing.T) {
+ s1iter = nopIter
+ s2iter = nopIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
+ require.NoError(t, err)
+ })
+ t.Run("error in the first sub storage, the second one is not iterated over", func(t *testing.T) {
+ s1iter = errIter
+ s2iter = panicIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
+ require.ErrorIs(t, err, myErr)
+ })
+
+ t.Run("ignore errors, storage 1", func(t *testing.T) {
+ s1iter = errIter
+ s2iter = nopIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
+ require.NoError(t, err)
+ })
+ t.Run("ignore errors, storage 2", func(t *testing.T) {
+ s1iter = nopIter
+ s2iter = errIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
+ require.NoError(t, err)
+ })
}
diff --git a/pkg/local_object_storage/blobstor/logger.go b/pkg/local_object_storage/blobstor/logger.go
index f201f18d74..070b1eac9d 100644
--- a/pkg/local_object_storage/blobstor/logger.go
+++ b/pkg/local_object_storage/blobstor/logger.go
@@ -1,16 +1,20 @@
package blobstor
import (
+ "context"
+
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
-const deleteOp = "DELETE"
-const putOp = "PUT"
+const (
+ deleteOp = "DELETE"
+ putOp = "PUT"
+)
-func logOp(l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
- storagelog.Write(l,
+func logOp(ctx context.Context, l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
+ storagelog.Write(ctx, l,
storagelog.AddressField(addr),
storagelog.OpField(op),
storagelog.StorageTypeField(typ),
diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go
index 4deb9f6e2b..3df96a1c36 100644
--- a/pkg/local_object_storage/blobstor/memstore/control.go
+++ b/pkg/local_object_storage/blobstor/memstore/control.go
@@ -1,15 +1,22 @@
package memstore
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+import (
+ "context"
-func (s *memstoreImpl) Open(readOnly bool) error {
- s.readOnly = readOnly
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+func (s *memstoreImpl) Open(mod mode.ComponentMode) error {
+ s.readOnly = mod.ReadOnly()
return nil
}
-func (s *memstoreImpl) Init() error { return nil }
-func (s *memstoreImpl) Close() error { return nil }
-func (s *memstoreImpl) Type() string { return Type }
-func (s *memstoreImpl) Path() string { return s.rootPath }
-func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
-func (s *memstoreImpl) SetReportErrorFunc(f func(string, error)) { s.reportError = f }
+func (s *memstoreImpl) Init() error { return nil }
+func (s *memstoreImpl) Close(context.Context) error { return nil }
+func (s *memstoreImpl) Type() string { return Type }
+func (s *memstoreImpl) Path() string { return s.rootPath }
+func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc }
+func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression }
+func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {}
+func (s *memstoreImpl) SetParentID(string) {}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go
index 12a8129750..7ef7e37a40 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore.go
@@ -2,6 +2,7 @@
package memstore
import (
+ "context"
"fmt"
"sync"
@@ -32,7 +33,7 @@ func New(opts ...Option) common.Storage {
return st
}
-func (s *memstoreImpl) Get(req common.GetPrm) (common.GetRes, error) {
+func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes, error) {
key := req.Address.EncodeToString()
s.mu.RLock()
@@ -40,26 +41,26 @@ func (s *memstoreImpl) Get(req common.GetPrm) (common.GetRes, error) {
s.mu.RUnlock()
if !exists {
- return common.GetRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
// Decompress the data.
var err error
if data, err = s.compression.Decompress(data); err != nil {
- return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
+ return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
}
// Unmarshal the SDK object.
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
+ return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil
}
-func (s *memstoreImpl) GetRange(req common.GetRangePrm) (common.GetRangeRes, error) {
- getResp, err := s.Get(common.GetPrm{
+func (s *memstoreImpl) GetRange(ctx context.Context, req common.GetRangePrm) (common.GetRangeRes, error) {
+ getResp, err := s.Get(ctx, common.GetPrm{
Address: req.Address,
StorageID: req.StorageID,
})
@@ -72,7 +73,7 @@ func (s *memstoreImpl) GetRange(req common.GetRangePrm) (common.GetRangeRes, err
to := from + req.Range.GetLength()
if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to {
- return common.GetRangeRes{}, logicerr.Wrap(apistatus.ObjectOutOfRange{})
+ return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange))
}
return common.GetRangeRes{
@@ -80,7 +81,7 @@ func (s *memstoreImpl) GetRange(req common.GetRangePrm) (common.GetRangeRes, err
}, nil
}
-func (s *memstoreImpl) Exists(req common.ExistsPrm) (common.ExistsRes, error) {
+func (s *memstoreImpl) Exists(_ context.Context, req common.ExistsPrm) (common.ExistsRes, error) {
key := req.Address.EncodeToString()
s.mu.RLock()
@@ -88,10 +89,9 @@ func (s *memstoreImpl) Exists(req common.ExistsPrm) (common.ExistsRes, error) {
_, exists := s.objs[key]
return common.ExistsRes{Exists: exists}, nil
-
}
-func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) {
+func (s *memstoreImpl) Put(_ context.Context, req common.PutPrm) (common.PutRes, error) {
if s.readOnly {
return common.PutRes{}, common.ErrReadOnly
}
@@ -108,7 +108,7 @@ func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) {
return common.PutRes{StorageID: []byte(s.rootPath)}, nil
}
-func (s *memstoreImpl) Delete(req common.DeletePrm) (common.DeleteRes, error) {
+func (s *memstoreImpl) Delete(_ context.Context, req common.DeletePrm) (common.DeleteRes, error) {
if s.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}
@@ -123,28 +123,25 @@ func (s *memstoreImpl) Delete(req common.DeletePrm) (common.DeleteRes, error) {
return common.DeleteRes{}, nil
}
- return common.DeleteRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
-func (s *memstoreImpl) Iterate(req common.IteratePrm) (common.IterateRes, error) {
+func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common.IterateRes, error) {
s.mu.RLock()
defer s.mu.RUnlock()
for k, v := range s.objs {
elem := common.IterationElement{
ObjectData: v,
}
- if err := elem.Address.DecodeString(string(k)); err != nil {
+ if err := elem.Address.DecodeString(k); err != nil {
if req.IgnoreErrors {
continue
}
- return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err))
+ return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err))
}
var err error
if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil {
if req.IgnoreErrors {
- if req.ErrorHandler != nil {
- return common.IterateRes{}, req.ErrorHandler(elem.Address, err)
- }
continue
}
return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decompressing data for address %q: %v", s, elem.Address.String(), err))
@@ -154,10 +151,6 @@ func (s *memstoreImpl) Iterate(req common.IteratePrm) (common.IterateRes, error)
if err := req.Handler(elem); err != nil {
return common.IterateRes{}, err
}
- case req.LazyHandler != nil:
- if err := req.LazyHandler(elem.Address, func() ([]byte, error) { return elem.ObjectData, nil }); err != nil {
- return common.IterateRes{}, err
- }
default:
if !req.IgnoreErrors {
return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) no Handler or LazyHandler set for IteratePrm", s))
@@ -166,3 +159,14 @@ func (s *memstoreImpl) Iterate(req common.IteratePrm) (common.IterateRes, error)
}
return common.IterateRes{}, nil
}
+
+func (s *memstoreImpl) Rebuild(_ context.Context, _ common.RebuildPrm) (common.RebuildRes, error) {
+ return common.RebuildRes{}, nil
+}
+
+func (s *memstoreImpl) ObjectsCount(_ context.Context) (uint64, error) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return uint64(len(s.objs)), nil
+}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
index 531a7d9e76..f904d42323 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
@@ -1,24 +1,23 @@
package memstore
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
)
func TestSimpleLifecycle(t *testing.T) {
s := New(
WithRootPath("memstore"),
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
- t.Cleanup(func() { _ = s.Close() })
- require.NoError(t, s.Open(false))
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+ require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
obj := blobstortest.NewObject(1024)
@@ -27,18 +26,18 @@ func TestSimpleLifecycle(t *testing.T) {
require.NoError(t, err)
{
- _, err := s.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true})
+ _, err := s.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true})
require.NoError(t, err)
}
{
- resp, err := s.Exists(common.ExistsPrm{Address: addr})
+ resp, err := s.Exists(context.Background(), common.ExistsPrm{Address: addr})
require.NoError(t, err)
require.True(t, resp.Exists)
}
{
- resp, err := s.Get(common.GetPrm{Address: addr})
+ resp, err := s.Get(context.Background(), common.GetPrm{Address: addr})
require.NoError(t, err)
require.Equal(t, obj.Payload(), resp.Object.Payload())
}
@@ -47,7 +46,7 @@ func TestSimpleLifecycle(t *testing.T) {
var objRange objectSDK.Range
objRange.SetOffset(256)
objRange.SetLength(512)
- resp, err := s.GetRange(common.GetRangePrm{
+ resp, err := s.GetRange(context.Background(), common.GetRangePrm{
Address: addr,
Range: objRange,
})
@@ -56,12 +55,12 @@ func TestSimpleLifecycle(t *testing.T) {
}
{
- _, err := s.Delete(common.DeletePrm{Address: addr})
+ _, err := s.Delete(context.Background(), common.DeletePrm{Address: addr})
require.NoError(t, err)
}
{
- resp, err := s.Exists(common.ExistsPrm{Address: addr})
+ resp, err := s.Exists(context.Background(), common.ExistsPrm{Address: addr})
require.NoError(t, err)
require.False(t, resp.Exists)
}
diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go
index 3d67b1e9cd..7605af4e59 100644
--- a/pkg/local_object_storage/blobstor/memstore/option.go
+++ b/pkg/local_object_storage/blobstor/memstore/option.go
@@ -2,33 +2,20 @@ package memstore
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
type cfg struct {
- log *logger.Logger
rootPath string
readOnly bool
- compression *compression.Config
- reportError func(string, error)
+ compression *compression.Compressor
}
func defaultConfig() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- reportError: func(string, error) {},
- }
+ return &cfg{}
}
type Option func(*cfg)
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
-
func WithRootPath(p string) Option {
return func(c *cfg) {
c.rootPath = p
diff --git a/pkg/local_object_storage/blobstor/metrics.go b/pkg/local_object_storage/blobstor/metrics.go
new file mode 100644
index 0000000000..aadc237af0
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/metrics.go
@@ -0,0 +1,30 @@
+package blobstor
+
+import "time"
+
+type Metrics interface {
+ SetParentID(parentID string)
+ SetMode(readOnly bool)
+ Close()
+
+ Delete(d time.Duration, success, withStorageID bool)
+ Exists(d time.Duration, success, withStorageID bool)
+ GetRange(d time.Duration, size int, success, withStorageID bool)
+ Get(d time.Duration, size int, success, withStorageID bool)
+ Iterate(d time.Duration, success bool)
+ Put(d time.Duration, size int, success bool)
+ ObjectsCount(d time.Duration, success bool)
+}
+
+type noopMetrics struct{}
+
+func (m *noopMetrics) SetParentID(string) {}
+func (m *noopMetrics) SetMode(bool) {}
+func (m *noopMetrics) Close() {}
+func (m *noopMetrics) Delete(time.Duration, bool, bool) {}
+func (m *noopMetrics) Exists(time.Duration, bool, bool) {}
+func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {}
+func (m *noopMetrics) Get(time.Duration, int, bool, bool) {}
+func (m *noopMetrics) Iterate(time.Duration, bool) {}
+func (m *noopMetrics) Put(time.Duration, int, bool) {}
+func (m *noopMetrics) ObjectsCount(time.Duration, bool) {}
diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go
index e6d0edc03d..80268fa7af 100644
--- a/pkg/local_object_storage/blobstor/mode.go
+++ b/pkg/local_object_storage/blobstor/mode.go
@@ -1,13 +1,14 @@
package blobstor
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
// SetMode sets the blobstor mode of operation.
-func (b *BlobStor) SetMode(m mode.Mode) error {
+func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
b.modeMtx.Lock()
defer b.modeMtx.Unlock()
@@ -19,16 +20,17 @@ func (b *BlobStor) SetMode(m mode.Mode) error {
return nil
}
- err := b.Close()
+ err := b.Close(ctx)
if err == nil {
- if err = b.Open(m.ReadOnly()); err == nil {
- err = b.Init()
+ if err = b.openBlobStor(ctx, m); err == nil {
+ err = b.Init(ctx)
}
}
if err != nil {
- return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
+ return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
}
b.mode = m
+ b.metrics.SetMode(m.ReadOnly())
return nil
}
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index 96d9026627..64e3c8da12 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -1,80 +1,82 @@
package blobstor
import (
- "encoding/binary"
+ "context"
"fmt"
- "os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
- "go.uber.org/atomic"
- "golang.org/x/exp/rand"
- "golang.org/x/exp/slices"
+ "golang.org/x/sync/errgroup"
)
+type storage struct {
+ desc string
+ create func(string) common.Storage
+}
+
+func (s storage) open(b *testing.B) common.Storage {
+ st := s.create(b.TempDir())
+
+ require.NoError(b, st.Open(mode.ComponentReadWrite))
+ require.NoError(b, st.Init())
+
+ return st
+}
+
// The storages to benchmark. Each storage has a description and a function which returns the actual
// storage along with a cleanup function.
-var storages = []struct {
- desc string
- create func(*testing.B) (common.Storage, func())
-}{
+var storages = []storage{
{
desc: "memstore",
- create: func(*testing.B) (common.Storage, func()) {
- return memstore.New(), func() {}
+ create: func(string) common.Storage {
+ return memstore.New()
},
},
{
desc: "fstree_nosync",
- create: func(b *testing.B) (common.Storage, func()) {
- dir, err := os.MkdirTemp(os.TempDir(), "fstree_nosync")
- if err != nil {
- b.Fatalf("creating fstree_nosync root path: %v", err)
- }
- cleanup := func() { os.RemoveAll(dir) }
+ create: func(dir string) common.Storage {
return fstree.New(
fstree.WithPath(dir),
fstree.WithDepth(2),
fstree.WithDirNameLen(2),
fstree.WithNoSync(true),
- ), cleanup
+ )
},
},
{
- desc: "fstree",
- create: func(b *testing.B) (common.Storage, func()) {
- dir, err := os.MkdirTemp(os.TempDir(), "fstree")
- if err != nil {
- b.Fatalf("creating fstree root path: %v", err)
- }
- cleanup := func() { os.RemoveAll(dir) }
+ desc: "fstree_without_object_counter",
+ create: func(dir string) common.Storage {
return fstree.New(
fstree.WithPath(dir),
fstree.WithDepth(2),
fstree.WithDirNameLen(2),
- ), cleanup
+ )
+ },
+ },
+ {
+ desc: "fstree_with_object_counter",
+ create: func(dir string) common.Storage {
+ return fstree.New(
+ fstree.WithPath(dir),
+ fstree.WithDepth(2),
+ fstree.WithDirNameLen(2),
+ fstree.WithFileCounter(fstree.NewSimpleCounter()),
+ )
},
},
{
desc: "blobovniczatree",
- create: func(b *testing.B) (common.Storage, func()) {
- dir, err := os.MkdirTemp(os.TempDir(), "blobovniczatree")
- if err != nil {
- b.Fatalf("creating blobovniczatree root path: %v", err)
- }
- cleanup := func() { os.RemoveAll(dir) }
+ create: func(dir string) common.Storage {
return blobovniczatree.NewBlobovniczaTree(
+ context.Background(),
blobovniczatree.WithRootPath(dir),
- ), cleanup
+ )
},
},
}
@@ -83,57 +85,57 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
readTests := []struct {
desc string
size int
- objGen func() objectGenerator
- addrGen func() addressGenerator
+ objGen func() testutil.ObjectGenerator
+ addrGen func() testutil.AddressGenerator
}{
{
desc: "seq100",
size: 10000,
- objGen: func() objectGenerator { return &seqObjGenerator{objSize: 100} },
- addrGen: func() addressGenerator { return &seqAddrGenerator{maxID: 100} },
+ objGen: func() testutil.ObjectGenerator { return &testutil.SeqObjGenerator{ObjSize: 100} },
+ addrGen: func() testutil.AddressGenerator { return &testutil.SeqAddrGenerator{MaxID: 100} },
},
{
desc: "rand100",
size: 10000,
- objGen: func() objectGenerator { return &seqObjGenerator{objSize: 100} },
- addrGen: func() addressGenerator { return randAddrGenerator(10000) },
+ objGen: func() testutil.ObjectGenerator { return &testutil.SeqObjGenerator{ObjSize: 100} },
+ addrGen: func() testutil.AddressGenerator { return testutil.RandAddrGenerator(10000) },
},
}
for _, tt := range readTests {
for _, stEntry := range storages {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
- st, cleanup := stEntry.create(b)
-
- require.NoError(b, st.Open(false))
- require.NoError(b, st.Init())
+ st := stEntry.open(b)
+ defer func() { require.NoError(b, st.Close(context.Background())) }()
// Fill database
- for i := 0; i < tt.size; i++ {
+ var errG errgroup.Group
+ for range tt.size {
obj := objGen.Next()
- addr := addressFromObject(obj)
- raw, err := obj.Marshal()
- require.NoError(b, err)
- if _, err := st.Put(common.PutPrm{
- Address: addr,
- RawData: raw,
- }); err != nil {
- b.Fatalf("writing entry: %v", err)
- }
+ addr := testutil.AddressFromObject(b, obj)
+ errG.Go(func() error {
+ raw, err := obj.Marshal()
+ if err != nil {
+ return fmt.Errorf("marshal: %v", err)
+ }
+ _, err = st.Put(context.Background(), common.PutPrm{
+ Address: addr,
+ RawData: raw,
+ })
+ return err
+ })
}
+ require.NoError(b, errG.Wait())
// Benchmark reading
addrGen := tt.addrGen()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- _, err := st.Get(common.GetPrm{Address: addrGen.Next()})
+ _, err := st.Get(context.Background(), common.GetPrm{Address: addrGen.Next()})
require.NoError(b, err)
}
})
-
- require.NoError(b, st.Close())
- cleanup()
})
}
}
@@ -142,33 +144,33 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
func BenchmarkSubstorageWritePerf(b *testing.B) {
generators := []struct {
desc string
- create func() objectGenerator
+ create func() testutil.ObjectGenerator
}{
- {desc: "rand10", create: func() objectGenerator { return &randObjGenerator{objSize: 10} }},
- {desc: "rand100", create: func() objectGenerator { return &randObjGenerator{objSize: 100} }},
- {desc: "rand1000", create: func() objectGenerator { return &randObjGenerator{objSize: 1000} }},
- {desc: "overwrite10", create: func() objectGenerator { return &overwriteObjGenerator{objSize: 10, maxObjects: 100} }},
- {desc: "overwrite100", create: func() objectGenerator { return &overwriteObjGenerator{objSize: 100, maxObjects: 100} }},
- {desc: "overwrite1000", create: func() objectGenerator { return &overwriteObjGenerator{objSize: 1000, maxObjects: 100} }},
+ {desc: "rand10", create: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 10} }},
+ {desc: "rand100", create: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 100} }},
+ {desc: "rand1000", create: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 1000} }},
+ {desc: "overwrite10", create: func() testutil.ObjectGenerator { return &testutil.OverwriteObjGenerator{ObjSize: 10, MaxObjects: 100} }},
+ {desc: "overwrite100", create: func() testutil.ObjectGenerator { return &testutil.OverwriteObjGenerator{ObjSize: 100, MaxObjects: 100} }},
+ {desc: "overwrite1000", create: func() testutil.ObjectGenerator {
+ return &testutil.OverwriteObjGenerator{ObjSize: 1000, MaxObjects: 100}
+ }},
}
for _, genEntry := range generators {
for _, stEntry := range storages {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
gen := genEntry.create()
- st, cleanup := stEntry.create(b)
-
- require.NoError(b, st.Open(false))
- require.NoError(b, st.Init())
+ st := stEntry.open(b)
+ defer func() { require.NoError(b, st.Close(context.Background())) }()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
obj := gen.Next()
- addr := addressFromObject(obj)
+ addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
require.NoError(b, err)
- if _, err := st.Put(common.PutPrm{
+ if _, err := st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {
@@ -176,9 +178,6 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
}
}
})
-
- require.NoError(b, st.Close())
- cleanup()
})
}
}
@@ -188,30 +187,28 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
iterateTests := []struct {
desc string
size int
- objGen func() objectGenerator
+ objGen func() testutil.ObjectGenerator
}{
{
desc: "rand100",
size: 10000,
- objGen: func() objectGenerator { return &randObjGenerator{objSize: 100} },
+ objGen: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 100} },
},
}
for _, tt := range iterateTests {
for _, stEntry := range storages {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
- st, cleanup := stEntry.create(b)
-
- require.NoError(b, st.Open(false))
- require.NoError(b, st.Init())
+ st := stEntry.open(b)
+ defer func() { require.NoError(b, st.Close(context.Background())) }()
// Fill database
- for i := 0; i < tt.size; i++ {
+ for range tt.size {
obj := objGen.Next()
- addr := addressFromObject(obj)
+ addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
require.NoError(b, err)
- if _, err := st.Put(common.PutPrm{
+ if _, err := st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {
@@ -222,7 +219,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
// Benchmark iterate
cnt := 0
b.ResetTimer()
- _, err := st.Iterate(common.IteratePrm{
+ _, err := st.Iterate(context.Background(), common.IteratePrm{
Handler: func(elem common.IterationElement) error {
cnt++
return nil
@@ -230,173 +227,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
})
require.NoError(b, err)
require.Equal(b, tt.size, cnt)
- b.StopTimer()
-
- require.NoError(b, st.Close())
- cleanup()
})
}
}
}
-
-func addressFromObject(obj *objectSDK.Object) oid.Address {
- var addr oid.Address
- if id, isSet := obj.ID(); isSet {
- addr.SetObject(id)
- } else {
- panic("object ID is not set")
- }
- if cid, isSet := obj.ContainerID(); isSet {
- addr.SetContainer(cid)
- } else {
- panic("container ID is not set")
- }
- return addr
-}
-
-// addressGenerator is the interface of types that generate object addresses.
-type addressGenerator interface {
- Next() oid.Address
-}
-
-// seqAddrGenerator is an addressGenerator that generates addresses sequentially and wraps around the given max ID.
-type seqAddrGenerator struct {
- cnt atomic.Uint64
- maxID uint64
-}
-
-func (g *seqAddrGenerator) Next() oid.Address {
- var id oid.ID
- binary.LittleEndian.PutUint64(id[:], ((g.cnt.Inc()-1)%g.maxID)+1)
- var addr oid.Address
- addr.SetContainer(cid.ID{})
- addr.SetObject(id)
- return addr
-}
-
-func TestSeqAddrGenerator(t *testing.T) {
- gen := &seqAddrGenerator{maxID: 10}
- for i := 1; i <= 20; i++ {
- addr := gen.Next()
- id := addr.Object()
-
- require.Equal(t, uint64((i-1)%int(gen.maxID)+1), binary.LittleEndian.Uint64(id[:]))
- }
-}
-
-// randAddrGenerator is an addressGenerator that generates random addresses in the given range.
-type randAddrGenerator uint64
-
-func (g randAddrGenerator) Next() oid.Address {
- var id oid.ID
- binary.LittleEndian.PutUint64(id[:], uint64(1+int(rand.Int63n(int64(g)))))
- var addr oid.Address
- addr.SetContainer(cid.ID{})
- addr.SetObject(id)
- return addr
-}
-
-func TestRandAddrGenerator(t *testing.T) {
- gen := randAddrGenerator(5)
- for i := 0; i < 50; i++ {
- addr := gen.Next()
- id := addr.Object()
- k := binary.LittleEndian.Uint64(id[:])
-
- require.True(t, 1 <= k && k <= uint64(gen))
- }
-}
-
-// objectGenerator is the interface of types that generate object entries.
-type objectGenerator interface {
- Next() *objectSDK.Object
-}
-
-// seqObjGenerator is an objectGenerator that generates entries with random payloads of size objSize and sequential IDs.
-type seqObjGenerator struct {
- cnt atomic.Uint64
- objSize uint64
-}
-
-func (g *seqObjGenerator) Next() *objectSDK.Object {
- var id oid.ID
- binary.LittleEndian.PutUint64(id[:], g.cnt.Inc())
- return genObject(id, cid.ID{}, g.objSize)
-}
-
-func TestSeqObjGenerator(t *testing.T) {
- gen := &seqObjGenerator{objSize: 10}
- var addrs []string
- for i := 1; i <= 10; i++ {
- obj := gen.Next()
- id, isSet := obj.ID()
- addrs = append(addrs, addressFromObject(obj).EncodeToString())
-
- require.True(t, isSet)
- require.Equal(t, gen.objSize, uint64(len(obj.Payload())))
- require.Equal(t, uint64(i), binary.LittleEndian.Uint64(id[:]))
- }
- require.True(t, slices.IsSorted(addrs))
-}
-
-// randObjGenerator is an objectGenerator that generates entries with random IDs and payloads of size objSize.
-type randObjGenerator struct {
- objSize uint64
-}
-
-func (g *randObjGenerator) Next() *objectSDK.Object {
- return genObject(oidtest.ID(), cidtest.ID(), g.objSize)
-}
-
-func TestRandObjGenerator(t *testing.T) {
- gen := &randObjGenerator{objSize: 10}
- for i := 0; i < 10; i++ {
- obj := gen.Next()
-
- require.Equal(t, gen.objSize, uint64(len(obj.Payload())))
- }
-}
-
-// overwriteObjGenerator is an objectGenerator that generates entries with random payloads of size objSize and at most maxObjects distinct IDs.
-type overwriteObjGenerator struct {
- objSize uint64
- maxObjects uint64
-}
-
-func (g *overwriteObjGenerator) Next() *objectSDK.Object {
- var id oid.ID
- binary.LittleEndian.PutUint64(id[:], uint64(1+rand.Int63n(int64(g.maxObjects))))
- return genObject(id, cid.ID{}, g.objSize)
-}
-
-func TestOverwriteObjGenerator(t *testing.T) {
- gen := &overwriteObjGenerator{
- objSize: 10,
- maxObjects: 4,
- }
- for i := 0; i < 40; i++ {
- obj := gen.Next()
- id, isSet := obj.ID()
- i := binary.LittleEndian.Uint64(id[:])
-
- require.True(t, isSet)
- require.Equal(t, gen.objSize, uint64(len(obj.Payload())))
- require.True(t, 1 <= i && i <= gen.maxObjects)
- }
-}
-
-// Generates an object with random payload and the given address and size.
-// TODO(#86): there's some testing-related dupes in many places. Probably worth
-// spending some time cleaning up a bit.
-func genObject(id oid.ID, cid cid.ID, sz uint64) *objectSDK.Object {
- raw := objectSDK.New()
-
- raw.SetID(id)
- raw.SetContainerID(cid)
-
- payload := make([]byte, sz)
- rand.Read(payload)
- raw.SetPayload(payload)
-
- return raw
-}
diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go
index a4009ae43b..fe9c109dd1 100644
--- a/pkg/local_object_storage/blobstor/put.go
+++ b/pkg/local_object_storage/blobstor/put.go
@@ -1,12 +1,16 @@
package blobstor
import (
+ "context"
"fmt"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ErrNoPlaceFound is returned when object can't be saved to any sub-storage component
@@ -21,7 +25,23 @@ var ErrNoPlaceFound = logicerr.New("couldn't find a place to store an object")
//
// Returns any error encountered that
// did not allow to completely save the object.
-func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
+func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ size = 0
+ )
+ defer func() {
+ b.metrics.Put(time.Since(startedAt), size, success)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Put",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("dont_compress", prm.DontCompress),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
@@ -32,16 +52,18 @@ func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
// marshal object
data, err := prm.Object.Marshal()
if err != nil {
- return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err)
+ return common.PutRes{}, fmt.Errorf("marshal the object: %w", err)
}
prm.RawData = data
}
+ size = len(prm.RawData)
for i := range b.storage {
if b.storage[i].Policy == nil || b.storage[i].Policy(prm.Object, prm.RawData) {
- res, err := b.storage[i].Storage.Put(prm)
+ res, err := b.storage[i].Storage.Put(ctx, prm)
if err == nil {
- logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
+ success = true
+ logOp(ctx, b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
}
return res, err
}
@@ -49,11 +71,3 @@ func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
return common.PutRes{}, ErrNoPlaceFound
}
-
-// NeedsCompression returns true if the object should be compressed.
-// For an object to be compressed 2 conditions must hold:
-// 1. Compression is enabled in settings.
-// 2. Object MIME Content-Type is allowed for compression.
-func (b *BlobStor) NeedsCompression(obj *objectSDK.Object) bool {
- return b.cfg.compression.NeedsCompression(obj)
-}
diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go
new file mode 100644
index 0000000000..f288165556
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/rebuild.go
@@ -0,0 +1,41 @@
+package blobstor
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+)
+
+type StorageIDUpdate interface {
+ UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error
+}
+
+func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error {
+ var summary common.RebuildRes
+ var rErr error
+ for _, storage := range b.storage {
+ res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{
+ MetaStorage: upd,
+ Limiter: concLimiter,
+ FillPercent: fillPercent,
+ })
+ summary.FilesRemoved += res.FilesRemoved
+ summary.ObjectsMoved += res.ObjectsMoved
+ if err != nil {
+ b.log.Error(ctx, logs.BlobstorRebuildFailedToRebuildStorages,
+ zap.String("failed_storage_path", storage.Storage.Path()),
+ zap.String("failed_storage_type", storage.Storage.Type()),
+ zap.Error(err))
+ rErr = err
+ break
+ }
+ }
+ b.log.Info(ctx, logs.BlobstorRebuildRebuildStoragesCompleted,
+ zap.Bool("success", rErr == nil),
+ zap.Uint64("total_files_removed", summary.FilesRemoved),
+ zap.Uint64("total_objects_moved", summary.ObjectsMoved))
+ return rErr
+}
diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go
new file mode 100644
index 0000000000..3a38ecf82f
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/teststore/option.go
@@ -0,0 +1,82 @@
+package teststore
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+type cfg struct {
+ st common.Storage
+ overrides struct {
+ Open func(mode mode.ComponentMode) error
+ Init func() error
+ Close func() error
+
+ Type func() string
+ Path func() string
+ SetCompressor func(cc *compression.Compressor)
+ Compressor func() *compression.Compressor
+ SetReportErrorFunc func(f func(context.Context, string, error))
+
+ Get func(common.GetPrm) (common.GetRes, error)
+ GetRange func(common.GetRangePrm) (common.GetRangeRes, error)
+ Exists func(common.ExistsPrm) (common.ExistsRes, error)
+ Put func(common.PutPrm) (common.PutRes, error)
+ Delete func(common.DeletePrm) (common.DeleteRes, error)
+ Iterate func(common.IteratePrm) (common.IterateRes, error)
+ }
+}
+
+type Option func(*cfg)
+
+func WithSubstorage(st common.Storage) Option {
+ return func(c *cfg) {
+ c.st = st
+ }
+}
+
+func WithOpen(f func(mode.ComponentMode) error) Option { return func(c *cfg) { c.overrides.Open = f } }
+func WithInit(f func() error) Option { return func(c *cfg) { c.overrides.Init = f } }
+func WithClose(f func() error) Option { return func(c *cfg) { c.overrides.Close = f } }
+
+func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } }
+func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } }
+
+func WithSetCompressor(f func(*compression.Compressor)) Option {
+ return func(c *cfg) { c.overrides.SetCompressor = f }
+}
+
+func WithCompressor(f func() *compression.Compressor) Option {
+ return func(c *cfg) { c.overrides.Compressor = f }
+}
+
+func WithReportErrorFunc(f func(func(context.Context, string, error))) Option {
+ return func(c *cfg) { c.overrides.SetReportErrorFunc = f }
+}
+
+func WithGet(f func(common.GetPrm) (common.GetRes, error)) Option {
+ return func(c *cfg) { c.overrides.Get = f }
+}
+
+func WithGetRange(f func(common.GetRangePrm) (common.GetRangeRes, error)) Option {
+ return func(c *cfg) { c.overrides.GetRange = f }
+}
+
+func WithExists(f func(common.ExistsPrm) (common.ExistsRes, error)) Option {
+ return func(c *cfg) { c.overrides.Exists = f }
+}
+
+func WithPut(f func(common.PutPrm) (common.PutRes, error)) Option {
+ return func(c *cfg) { c.overrides.Put = f }
+}
+
+func WithDelete(f func(common.DeletePrm) (common.DeleteRes, error)) Option {
+ return func(c *cfg) { c.overrides.Delete = f }
+}
+
+func WithIterate(f func(common.IteratePrm) (common.IterateRes, error)) Option {
+ return func(c *cfg) { c.overrides.Iterate = f }
+}
diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go
new file mode 100644
index 0000000000..190b6a8760
--- /dev/null
+++ b/pkg/local_object_storage/blobstor/teststore/teststore.go
@@ -0,0 +1,243 @@
+// Package teststore provides a common.Storage implementation for testing/mocking purposes.
+//
+// A new teststore.TestStore can be obtained with teststore.New. Whenever one of the common.Storage
+// methods is called, the implementation selects what function to call in the following order:
+// 1. If an override for that method was provided at construction time (via teststore.WithXXX()) or
+// afterwards via SetOption, that override is used.
+// 2. If a substorage was provided at construction time (via teststore.WithSubstorage()) or afterwars
+// via SetOption, the corresponding method in the substorage is used.
+// 3. If none of the above apply, the call panics with an error describing the unexpected call.
+//
+// It's safe to call SetOption and the overrides from multiple goroutines, but it's the override's
+// responsibility to ensure safety of whatever operation it executes.
+package teststore
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+// TestStore is a common.Storage implementation for testing/mocking purposes.
+type TestStore struct {
+ mu sync.RWMutex
+ *cfg
+}
+
+// ErrDiskExploded is a phony error which can be used for testing purposes to differentiate it from
+// more common errors.
+var ErrDiskExploded = errors.New("disk exploded")
+
+// New returns a teststore.TestStore from the given options.
+func New(opts ...Option) *TestStore {
+ c := &cfg{}
+ for _, opt := range opts {
+ opt(c)
+ }
+ return &TestStore{cfg: c}
+}
+
+// SetOption overrides an option of an existing teststore.TestStore.
+// This is useful for overriding methods during a test so that different
+// behaviors are simulated.
+func (s *TestStore) SetOption(opt Option) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ opt(s.cfg)
+}
+
+func (s *TestStore) Open(mod mode.ComponentMode) error {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.Open != nil:
+ return s.overrides.Open(mod)
+ case s.st != nil:
+ return s.st.Open(mod)
+ default:
+ panic(fmt.Sprintf("unexpected storage call: Open(%v)", mod.String()))
+ }
+}
+
+func (s *TestStore) Init() error {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.Init != nil:
+ return s.overrides.Init()
+ case s.st != nil:
+ return s.st.Init()
+ default:
+ panic("unexpected storage call: Init()")
+ }
+}
+
+func (s *TestStore) Close(ctx context.Context) error {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.Close != nil:
+ return s.overrides.Close()
+ case s.st != nil:
+ return s.st.Close(ctx)
+ default:
+ panic("unexpected storage call: Close()")
+ }
+}
+
+func (s *TestStore) Type() string {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.Type != nil:
+ return s.overrides.Type()
+ case s.st != nil:
+ return s.st.Type()
+ default:
+ panic("unexpected storage call: Type()")
+ }
+}
+
+func (s *TestStore) Path() string {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.Path != nil:
+ return s.overrides.Path()
+ case s.st != nil:
+ return s.st.Path()
+ default:
+ panic("unexpected storage call: Path()")
+ }
+}
+
+func (s *TestStore) SetCompressor(cc *compression.Compressor) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.SetCompressor != nil:
+ s.overrides.SetCompressor(cc)
+ case s.st != nil:
+ s.st.SetCompressor(cc)
+ default:
+ panic(fmt.Sprintf("unexpected storage call: SetCompressor(%+v)", cc))
+ }
+}
+
+func (s *TestStore) Compressor() *compression.Compressor {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.Compressor != nil:
+ return s.overrides.Compressor()
+ case s.st != nil:
+ return s.st.Compressor()
+ default:
+ panic("unexpected storage call: Compressor()")
+ }
+}
+
+func (s *TestStore) SetReportErrorFunc(f func(context.Context, string, error)) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.SetReportErrorFunc != nil:
+ s.overrides.SetReportErrorFunc(f)
+ case s.st != nil:
+ s.st.SetReportErrorFunc(f)
+ default:
+ panic("unexpected storage call: SetReportErrorFunc()")
+ }
+}
+
+func (s *TestStore) Get(ctx context.Context, req common.GetPrm) (common.GetRes, error) {
+ switch {
+ case s.overrides.Get != nil:
+ return s.overrides.Get(req)
+ case s.st != nil:
+ return s.st.Get(ctx, req)
+ default:
+ panic(fmt.Sprintf("unexpected storage call: Get(%+v)", req))
+ }
+}
+
+func (s *TestStore) GetRange(ctx context.Context, req common.GetRangePrm) (common.GetRangeRes, error) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.GetRange != nil:
+ return s.overrides.GetRange(req)
+ case s.st != nil:
+ return s.st.GetRange(ctx, req)
+ default:
+ panic(fmt.Sprintf("unexpected storage call: GetRange(%+v)", req))
+ }
+}
+
+func (s *TestStore) Exists(ctx context.Context, req common.ExistsPrm) (common.ExistsRes, error) {
+ switch {
+ case s.overrides.Exists != nil:
+ return s.overrides.Exists(req)
+ case s.st != nil:
+ return s.st.Exists(ctx, req)
+ default:
+ panic(fmt.Sprintf("unexpected storage call: Exists(%+v)", req))
+ }
+}
+
+func (s *TestStore) Put(ctx context.Context, req common.PutPrm) (common.PutRes, error) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.Put != nil:
+ return s.overrides.Put(req)
+ case s.st != nil:
+ return s.st.Put(ctx, req)
+ default:
+ panic(fmt.Sprintf("unexpected storage call: Put(%+v)", req))
+ }
+}
+
+func (s *TestStore) Delete(ctx context.Context, req common.DeletePrm) (common.DeleteRes, error) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.Delete != nil:
+ return s.overrides.Delete(req)
+ case s.st != nil:
+ return s.st.Delete(ctx, req)
+ default:
+ panic(fmt.Sprintf("unexpected storage call: Delete(%+v)", req))
+ }
+}
+
+func (s *TestStore) Iterate(ctx context.Context, req common.IteratePrm) (common.IterateRes, error) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ switch {
+ case s.overrides.Iterate != nil:
+ return s.overrides.Iterate(req)
+ case s.st != nil:
+ return s.st.Iterate(ctx, req)
+ default:
+ panic(fmt.Sprintf("unexpected storage call: Iterate(%+v)", req))
+ }
+}
+
+func (s *TestStore) SetParentID(string) {}
+
+func (s *TestStore) Rebuild(_ context.Context, _ common.RebuildPrm) (common.RebuildRes, error) {
+ return common.RebuildRes{}, nil
+}
+
+func (s *TestStore) ObjectsCount(ctx context.Context) (uint64, error) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return s.st.ObjectsCount(ctx)
+}
diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go
index 0348371107..e0617a8326 100644
--- a/pkg/local_object_storage/engine/container.go
+++ b/pkg/local_object_storage/engine/container.go
@@ -1,6 +1,8 @@
package engine
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
@@ -42,22 +44,25 @@ func (r ListContainersRes) Containers() []cid.ID {
// ContainerSize returns the sum of estimation container sizes among all shards.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
+func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) {
+ defer elapsed("ContainerSize", e.metrics.AddMethodDuration)()
+
err = e.execIfNotBlocked(func() error {
- res, err = e.containerSize(prm)
- return err
+ var csErr error
+ res, csErr = e.containerSize(ctx, prm)
+ return csErr
})
return
}
// ContainerSize calls ContainerSize method on engine to calculate sum of estimation container sizes among all shards.
-func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) {
+func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, error) {
var prm ContainerSizePrm
prm.SetContainerID(id)
- res, err := e.ContainerSize(prm)
+ res, err := e.ContainerSize(ctx, prm)
if err != nil {
return 0, err
}
@@ -65,18 +70,15 @@ func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) {
return res.Size(), nil
}
-func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddEstimateContainerSizeDuration)()
- }
-
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
+ var res ContainerSizeRes
+ err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
var csPrm shard.ContainerSizePrm
csPrm.SetContainerID(prm.cnr)
- csRes, err := sh.Shard.ContainerSize(csPrm)
+ csRes, err := sh.ContainerSize(ctx, csPrm)
if err != nil {
- e.reportShardError(sh, "can't get container size", err,
+ e.reportShardError(ctx, sh, "can't get container size", err,
zap.Stringer("container_id", prm.cnr))
return false
}
@@ -86,26 +88,29 @@ func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRe
return false
})
- return
+ return res, err
}
// ListContainers returns a unique container IDs presented in the engine objects.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) ListContainers(_ ListContainersPrm) (res ListContainersRes, err error) {
+func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) (res ListContainersRes, err error) {
+ defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
+
err = e.execIfNotBlocked(func() error {
- res, err = e.listContainers()
- return err
+ var lcErr error
+ res, lcErr = e.listContainers(ctx)
+ return lcErr
})
return
}
// ListContainers calls ListContainers method on engine to get a unique container IDs presented in the engine objects.
-func ListContainers(e *StorageEngine) ([]cid.ID, error) {
+func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) {
var prm ListContainersPrm
- res, err := e.ListContainers(prm)
+ res, err := e.ListContainers(ctx, prm)
if err != nil {
return nil, err
}
@@ -113,17 +118,13 @@ func ListContainers(e *StorageEngine) ([]cid.ID, error) {
return res.Containers(), nil
}
-func (e *StorageEngine) listContainers() (ListContainersRes, error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddListContainersDuration)()
- }
-
+func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) {
uniqueIDs := make(map[string]cid.ID)
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- res, err := sh.Shard.ListContainers(shard.ListContainersPrm{})
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ res, err := sh.ListContainers(ctx, shard.ListContainersPrm{})
if err != nil {
- e.reportShardError(sh, "can't get list of containers", err)
+ e.reportShardError(ctx, sh, "can't get list of containers", err)
return false
}
@@ -135,7 +136,9 @@ func (e *StorageEngine) listContainers() (ListContainersRes, error) {
}
return false
- })
+ }); err != nil {
+ return ListContainersRes{}, err
+ }
result := make([]cid.ID, 0, len(uniqueIDs))
for _, v := range uniqueIDs {
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index c5e51a7f02..39e532b6b2 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -1,15 +1,20 @@
package engine
import (
+ "context"
"errors"
"fmt"
"path/filepath"
"strings"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
)
type shardInitError struct {
@@ -18,22 +23,18 @@ type shardInitError struct {
}
// Open opens all StorageEngine's components.
-func (e *StorageEngine) Open() error {
- return e.open()
-}
-
-func (e *StorageEngine) open() error {
+func (e *StorageEngine) Open(ctx context.Context) error {
e.mtx.Lock()
defer e.mtx.Unlock()
var wg sync.WaitGroup
- var errCh = make(chan shardInitError, len(e.shards))
+ errCh := make(chan shardInitError, len(e.shards))
for id, sh := range e.shards {
wg.Add(1)
go func(id string, sh *shard.Shard) {
defer wg.Done()
- if err := sh.Open(); err != nil {
+ if err := sh.Open(ctx); err != nil {
errCh <- shardInitError{
err: err,
id: id,
@@ -46,16 +47,16 @@ func (e *StorageEngine) open() error {
for res := range errCh {
if res.err != nil {
- e.log.Error("could not open shard, closing and skipping",
+ e.log.Error(ctx, logs.EngineCouldNotOpenShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
sh := e.shards[res.id]
delete(e.shards, res.id)
- err := sh.Close()
+ err := sh.Close(ctx)
if err != nil {
- e.log.Error("could not close partially initialized shard",
+ e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
@@ -68,48 +69,53 @@ func (e *StorageEngine) open() error {
}
// Init initializes all StorageEngine's components.
-func (e *StorageEngine) Init() error {
+func (e *StorageEngine) Init(ctx context.Context) error {
e.mtx.Lock()
defer e.mtx.Unlock()
- var wg sync.WaitGroup
- var errCh = make(chan shardInitError, len(e.shards))
+ errCh := make(chan shardInitError, len(e.shards))
+ var eg errgroup.Group
+ if e.lowMem && e.anyShardRequiresRefill() {
+ eg.SetLimit(1)
+ }
for id, sh := range e.shards {
- wg.Add(1)
- go func(id string, sh *shard.Shard) {
- defer wg.Done()
- if err := sh.Init(); err != nil {
+ eg.Go(func() error {
+ if err := sh.Init(ctx); err != nil {
errCh <- shardInitError{
err: err,
id: id,
}
}
- }(id, sh.Shard)
+ return nil
+ })
}
- wg.Wait()
+ err := eg.Wait()
close(errCh)
+ if err != nil {
+ return fmt.Errorf("initialize shards: %w", err)
+ }
for res := range errCh {
if res.err != nil {
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
- e.log.Error("could not initialize shard, closing and skipping",
+ e.log.Error(ctx, logs.EngineCouldNotInitializeShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
sh := e.shards[res.id]
delete(e.shards, res.id)
- err := sh.Close()
+ err := sh.Close(ctx)
if err != nil {
- e.log.Error("could not close partially initialized shard",
+ e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
continue
}
- return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err)
+ return fmt.Errorf("initialize shard %s: %w", res.id, res.err)
}
}
@@ -118,39 +124,42 @@ func (e *StorageEngine) Init() error {
}
e.wg.Add(1)
- go e.setModeLoop()
+ go e.setModeLoop(ctx)
return nil
}
+func (e *StorageEngine) anyShardRequiresRefill() bool {
+ for _, sh := range e.shards {
+ if sh.NeedRefillMetabase() {
+ return true
+ }
+ }
+ return false
+}
+
var errClosed = errors.New("storage engine is closed")
// Close releases all StorageEngine's components. Waits for all data-related operations to complete.
// After the call, all the next ones will fail.
//
// The method MUST only be called when the application exits.
-func (e *StorageEngine) Close() error {
+func (e *StorageEngine) Close(ctx context.Context) error {
close(e.closeCh)
defer e.wg.Wait()
- return e.setBlockExecErr(errClosed)
+ return e.closeEngine(ctx)
}
// closes all shards. Never returns an error, shard errors are logged.
-func (e *StorageEngine) close(releasePools bool) error {
+func (e *StorageEngine) closeAllShards(ctx context.Context) error {
e.mtx.RLock()
defer e.mtx.RUnlock()
- if releasePools {
- for _, p := range e.shardPools {
- p.Release()
- }
- }
-
for id, sh := range e.shards {
- if err := sh.Close(); err != nil {
- e.log.Debug("could not close shard",
+ if err := sh.Close(ctx); err != nil {
+ e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
zap.String("id", id),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
}
@@ -165,90 +174,29 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error {
e.blockExec.mtx.RLock()
defer e.blockExec.mtx.RUnlock()
- if e.blockExec.err != nil {
- return e.blockExec.err
+ if e.blockExec.closed {
+ return errClosed
}
return op()
}
-// sets the flag of blocking execution of all data operations according to err:
-// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method
-// (if err == errClosed => additionally releases pools and does not allow to resume executions).
-// - otherwise, resumes execution. If exec was blocked, calls open method.
-//
-// Can be called concurrently with exec. In this case it waits for all executions to complete.
-func (e *StorageEngine) setBlockExecErr(err error) error {
+func (e *StorageEngine) closeEngine(ctx context.Context) error {
e.blockExec.mtx.Lock()
defer e.blockExec.mtx.Unlock()
- prevErr := e.blockExec.err
-
- wasClosed := errors.Is(prevErr, errClosed)
- if wasClosed {
+ if e.blockExec.closed {
return errClosed
}
- e.blockExec.err = err
-
- if err == nil {
- if prevErr != nil { // block -> ok
- return e.open()
- }
- } else if prevErr == nil { // ok -> block
- return e.close(errors.Is(err, errClosed))
- }
-
- // otherwise do nothing
-
- return nil
-}
-
-// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err.
-// To resume the execution, use ResumeExecution method.
-//
-// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources
-// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions
-// to complete). Returns error if any Close has been called before.
-//
-// Must not be called concurrently with either Open or Init.
-//
-// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution
-// for this.
-func (e *StorageEngine) BlockExecution(err error) error {
- return e.setBlockExecErr(err)
-}
-
-// ResumeExecution resumes the execution of any data-related operation.
-// To block the execution, use BlockExecution method.
-//
-// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources
-// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions
-// to complete). Returns error if any Close has been called before.
-//
-// Must not be called concurrently with either Open or Init.
-func (e *StorageEngine) ResumeExecution() error {
- return e.setBlockExecErr(nil)
+ e.blockExec.closed = true
+ return e.closeAllShards(ctx)
}
type ReConfiguration struct {
- errorsThreshold uint32
- shardPoolSize uint32
-
shards map[string][]shard.Option // meta path -> shard opts
}
-// SetErrorsThreshold sets a size amount of errors after which
-// shard is moved to read-only mode.
-func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) {
- rCfg.errorsThreshold = errorsThreshold
-}
-
-// SetShardPoolSize sets a size of worker pool for each shard.
-func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) {
- rCfg.shardPoolSize = shardPoolSize
-}
-
// AddShard adds a shard for the reconfiguration.
// Shard identifier is calculated from paths used in blobstor.
func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {
@@ -264,7 +212,8 @@ func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {
}
// Reload reloads StorageEngine's configuration in runtime.
-func (e *StorageEngine) Reload(rcfg ReConfiguration) error {
+func (e *StorageEngine) Reload(ctx context.Context, rcfg ReConfiguration) error {
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagCritical.String())
type reloadInfo struct {
sh *shard.Shard
opts []shard.Option
@@ -303,41 +252,41 @@ loop:
e.mtx.RUnlock()
- e.removeShards(shardsToRemove...)
+ e.removeShards(ctx, shardsToRemove...)
for _, p := range shardsToReload {
- err := p.sh.Reload(p.opts...)
+ err := p.sh.Reload(ctx, p.opts...)
if err != nil {
- e.log.Error("could not reload a shard",
+ e.log.Error(ctx, logs.EngineCouldNotReloadAShard,
zap.Stringer("shard id", p.sh.ID()),
zap.Error(err))
}
}
for _, newID := range shardsToAdd {
- sh, err := e.createShard(rcfg.shards[newID])
+ sh, err := e.createShard(ctx, rcfg.shards[newID])
if err != nil {
- return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err)
+ return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err)
}
idStr := sh.ID().String()
- err = sh.Open()
+ err = sh.Open(ctx)
if err == nil {
- err = sh.Init()
+ err = sh.Init(ctx)
}
if err != nil {
- _ = sh.Close()
- return fmt.Errorf("could not init %s shard: %w", idStr, err)
+ _ = sh.Close(ctx)
+ return fmt.Errorf("init %s shard: %w", idStr, err)
}
err = e.addShard(sh)
if err != nil {
- _ = sh.Close()
- return fmt.Errorf("could not add %s shard: %w", idStr, err)
+ _ = sh.Close(ctx)
+ return fmt.Errorf("add %s shard: %w", idStr, err)
}
- e.log.Info("added new shard", zap.String("id", idStr))
+ e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))
}
return nil
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index 65a8d4348b..4ff0ed5ec6 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -1,218 +1,189 @@
package engine
import (
- "errors"
+ "context"
"fmt"
+ "io/fs"
"os"
"path/filepath"
"strconv"
+ "sync/atomic"
"testing"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
- "go.uber.org/zap/zaptest"
)
// TestInitializationFailure checks that shard is initialized and closed even if media
-// under any single component is absent. We emulate this with permission denied error.
+// under any single component is absent.
func TestInitializationFailure(t *testing.T) {
- type paths struct {
- blobstor string
- metabase string
- writecache string
- pilorama string
+ type openFileFunc func(string, int, fs.FileMode) (*os.File, error)
+
+ type testShardOpts struct {
+ openFileMetabase openFileFunc
+ openFilePilorama openFileFunc
}
- existsDir := filepath.Join(t.TempDir(), "shard")
- badDir := filepath.Join(t.TempDir(), "missing")
-
- testShard := func(c paths) []shard.Option {
+ testShard := func(opts testShardOpts) ([]shard.Option, *teststore.TestStore, *teststore.TestStore) {
sid, err := generateShardID()
require.NoError(t, err)
+ storages, smallFileStorage, largeFileStorage := newTestStorages(t.TempDir(), 1<<20)
+
+ wcOpts := []writecache.Option{
+ writecache.WithPath(t.TempDir()),
+ }
+
return []shard.Option{
shard.WithID(sid),
- shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ shard.WithLogger(test.NewLogger(t)),
shard.WithBlobStorOptions(
- blobstor.WithStorages(
- newStorages(c.blobstor, 1<<20))),
+ blobstor.WithStorages(storages)),
shard.WithMetaBaseOptions(
meta.WithBoltDBOptions(&bbolt.Options{
- Timeout: 100 * time.Millisecond,
+ Timeout: 100 * time.Millisecond,
+ OpenFile: opts.openFileMetabase,
}),
- meta.WithPath(c.metabase),
- meta.WithPermissions(0700),
+ meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
+ meta.WithPermissions(0o700),
meta.WithEpochState(epochState{})),
shard.WithWriteCache(true),
- shard.WithWriteCacheOptions(writecache.WithPath(c.writecache)),
- shard.WithPiloramaOptions(pilorama.WithPath(c.pilorama)),
- }
+ shard.WithWriteCacheOptions(wcOpts),
+ shard.WithPiloramaOptions(
+ pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")),
+ pilorama.WithOpenFile(opts.openFilePilorama),
+ ),
+ }, smallFileStorage, largeFileStorage
}
t.Run("blobstor", func(t *testing.T) {
- badDir := filepath.Join(badDir, t.Name())
- require.NoError(t, os.MkdirAll(badDir, os.ModePerm))
- require.NoError(t, os.Chmod(badDir, 0))
- testEngineFailInitAndReload(t, badDir, false, testShard(paths{
- blobstor: filepath.Join(badDir, "0"),
- metabase: filepath.Join(existsDir, t.Name(), "1"),
- writecache: filepath.Join(existsDir, t.Name(), "2"),
- pilorama: filepath.Join(existsDir, t.Name(), "3"),
+ shardOpts, _, largeFileStorage := testShard(testShardOpts{
+ openFileMetabase: os.OpenFile,
+ openFilePilorama: os.OpenFile,
+ })
+ largeFileStorage.SetOption(teststore.WithOpen(func(primitiveMode mode.ComponentMode) error {
+ return teststore.ErrDiskExploded
}))
+ beforeReload := func() {
+ largeFileStorage.SetOption(teststore.WithOpen(nil))
+ }
+ testEngineFailInitAndReload(t, false, shardOpts, beforeReload)
})
t.Run("metabase", func(t *testing.T) {
- badDir := filepath.Join(badDir, t.Name())
- require.NoError(t, os.MkdirAll(badDir, os.ModePerm))
- require.NoError(t, os.Chmod(badDir, 0))
- testEngineFailInitAndReload(t, badDir, true, testShard(paths{
- blobstor: filepath.Join(existsDir, t.Name(), "0"),
- metabase: filepath.Join(badDir, "1"),
- writecache: filepath.Join(existsDir, t.Name(), "2"),
- pilorama: filepath.Join(existsDir, t.Name(), "3"),
- }))
- })
- t.Run("write-cache", func(t *testing.T) {
- badDir := filepath.Join(badDir, t.Name())
- require.NoError(t, os.MkdirAll(badDir, os.ModePerm))
- require.NoError(t, os.Chmod(badDir, 0))
- testEngineFailInitAndReload(t, badDir, false, testShard(paths{
- blobstor: filepath.Join(existsDir, t.Name(), "0"),
- metabase: filepath.Join(existsDir, t.Name(), "1"),
- writecache: filepath.Join(badDir, "2"),
- pilorama: filepath.Join(existsDir, t.Name(), "3"),
- }))
+ var openFileMetabaseSucceed atomic.Bool
+ openFileMetabase := func(p string, f int, mode fs.FileMode) (*os.File, error) {
+ if openFileMetabaseSucceed.Load() {
+ return os.OpenFile(p, f, mode)
+ }
+ return nil, teststore.ErrDiskExploded
+ }
+ beforeReload := func() {
+ openFileMetabaseSucceed.Store(true)
+ }
+ shardOpts, _, _ := testShard(testShardOpts{
+ openFileMetabase: openFileMetabase,
+ openFilePilorama: os.OpenFile,
+ })
+ testEngineFailInitAndReload(t, true, shardOpts, beforeReload)
})
t.Run("pilorama", func(t *testing.T) {
- badDir := filepath.Join(badDir, t.Name())
- require.NoError(t, os.MkdirAll(badDir, os.ModePerm))
- require.NoError(t, os.Chmod(badDir, 0))
- testEngineFailInitAndReload(t, badDir, false, testShard(paths{
- blobstor: filepath.Join(existsDir, t.Name(), "0"),
- metabase: filepath.Join(existsDir, t.Name(), "1"),
- writecache: filepath.Join(existsDir, t.Name(), "2"),
- pilorama: filepath.Join(badDir, "3"),
- }))
+ var openFilePiloramaSucceed atomic.Bool
+ openFilePilorama := func(p string, f int, mode fs.FileMode) (*os.File, error) {
+ if openFilePiloramaSucceed.Load() {
+ return os.OpenFile(p, f, mode)
+ }
+ return nil, teststore.ErrDiskExploded
+ }
+ beforeReload := func() {
+ openFilePiloramaSucceed.Store(true)
+ }
+ shardOpts, _, _ := testShard(testShardOpts{
+ openFileMetabase: os.OpenFile,
+ openFilePilorama: openFilePilorama,
+ })
+ testEngineFailInitAndReload(t, false, shardOpts, beforeReload)
})
}
-func testEngineFailInitAndReload(t *testing.T, badDir string, errOnAdd bool, s []shard.Option) {
+func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.Option, beforeReload func()) {
var configID string
e := New()
- _, err := e.AddShard(s...)
- if errOnAdd {
- require.Error(t, err)
- // This branch is only taken when we cannot update shard ID in the metabase.
- // The id cannot be encountered during normal operation, but it is ok for tests:
- // it is only compared for equality with other ids and we have 0 shards here.
- configID = "id"
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+ _, err := e.AddShard(context.Background(), opts...)
+ require.NoError(t, err)
+
+ e.mtx.RLock()
+ var id string
+ for id = range e.shards {
+ break
+ }
+ configID = calculateShardID(e.shards[id].Shard.DumpInfo())
+ e.mtx.RUnlock()
+
+ err = e.Open(context.Background())
+ require.NoError(t, err)
+ if degradedMode {
+ require.NoError(t, e.Init(context.Background()))
+ require.Equal(t, mode.DegradedReadOnly, e.DumpInfo().Shards[0].Mode)
+ return
} else {
- require.NoError(t, err)
+ require.Error(t, e.Init(context.Background()))
e.mtx.RLock()
- var id string
- for id = range e.shards {
- break
- }
- configID = calculateShardID(e.shards[id].Shard.DumpInfo())
+ shardCount := len(e.shards)
e.mtx.RUnlock()
-
- err = e.Open()
- if err == nil {
- require.Error(t, e.Init())
- }
+ require.Equal(t, 0, shardCount)
}
+ beforeReload()
+
+ require.NoError(t, e.Reload(context.Background(), ReConfiguration{
+ shards: map[string][]shard.Option{configID: opts},
+ }))
+
e.mtx.RLock()
shardCount := len(e.shards)
e.mtx.RUnlock()
- require.Equal(t, 0, shardCount)
-
- require.NoError(t, os.Chmod(badDir, os.ModePerm))
- require.NoError(t, e.Reload(ReConfiguration{
- shards: map[string][]shard.Option{configID: s},
- }))
-
- e.mtx.RLock()
- shardCount = len(e.shards)
- e.mtx.RUnlock()
require.Equal(t, 1, shardCount)
}
-func TestExecBlocks(t *testing.T) {
- e := testNewEngineWithShardNum(t, 2) // number doesn't matter in this test, 2 is several but not many
- t.Cleanup(func() {
- os.RemoveAll(t.Name())
- })
-
- // put some object
- obj := generateObjectWithCID(t, cidtest.ID())
-
- addr := object.AddressOf(obj)
-
- require.NoError(t, Put(e, obj))
-
- // block executions
- errBlock := errors.New("block exec err")
-
- require.NoError(t, e.BlockExecution(errBlock))
-
- // try to exec some op
- _, err := Head(e, addr)
- require.ErrorIs(t, err, errBlock)
-
- // resume executions
- require.NoError(t, e.ResumeExecution())
-
- _, err = Head(e, addr) // can be any data-related op
- require.NoError(t, err)
-
- // close
- require.NoError(t, e.Close())
-
- // try exec after close
- _, err = Head(e, addr)
- require.Error(t, err)
-
- // try to resume
- require.Error(t, e.ResumeExecution())
-}
-
func TestPersistentShardID(t *testing.T) {
- dir, err := os.MkdirTemp("", "*")
- require.NoError(t, err)
+ dir := t.TempDir()
- e, _, id := newEngineWithErrorThreshold(t, dir, 1)
+ te := newEngineWithErrorThreshold(t, dir, 1)
- checkShardState(t, e, id[0], 0, mode.ReadWrite)
- require.NoError(t, e.Close())
+ checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
+ require.NoError(t, te.ng.Close(context.Background()))
- e, _, newID := newEngineWithErrorThreshold(t, dir, 1)
- require.Equal(t, id, newID)
- require.NoError(t, e.Close())
+ newTe := newEngineWithErrorThreshold(t, dir, 1)
+ for i := range len(newTe.shards) {
+ require.Equal(t, te.shards[i].id, newTe.shards[i].id)
+ }
+ require.NoError(t, newTe.ng.Close(context.Background()))
- p1 := e.shards[id[0].String()].Shard.DumpInfo().MetaBaseInfo.Path
- p2 := e.shards[id[1].String()].Shard.DumpInfo().MetaBaseInfo.Path
+ p1 := newTe.ng.shards[te.shards[0].id.String()].Shard.DumpInfo().MetaBaseInfo.Path
+ p2 := newTe.ng.shards[te.shards[1].id.String()].Shard.DumpInfo().MetaBaseInfo.Path
tmp := filepath.Join(dir, "tmp")
require.NoError(t, os.Rename(p1, tmp))
require.NoError(t, os.Rename(p2, p1))
require.NoError(t, os.Rename(tmp, p2))
- e, _, newID = newEngineWithErrorThreshold(t, dir, 1)
- require.Equal(t, id[1], newID[0])
- require.Equal(t, id[0], newID[1])
- require.NoError(t, e.Close())
-
+ newTe = newEngineWithErrorThreshold(t, dir, 1)
+ require.Equal(t, te.shards[1].id, newTe.shards[0].id)
+ require.Equal(t, te.shards[0].id, newTe.shards[1].id)
+ require.NoError(t, newTe.ng.Close(context.Background()))
}
func TestReload(t *testing.T) {
@@ -230,11 +201,10 @@ func TestReload(t *testing.T) {
}
rcfg.AddShard(currShards[0], nil) // same path
- require.NoError(t, e.Reload(rcfg))
+ require.NoError(t, e.Reload(context.Background(), rcfg))
// no new paths => no new shards
require.Equal(t, shardNum, len(e.shards))
- require.Equal(t, shardNum, len(e.shardPools))
newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum))
@@ -243,10 +213,11 @@ func TestReload(t *testing.T) {
meta.WithPath(newMeta),
meta.WithEpochState(epochState{}),
)})
- require.NoError(t, e.Reload(rcfg))
+ require.NoError(t, e.Reload(context.Background(), rcfg))
require.Equal(t, shardNum+1, len(e.shards))
- require.Equal(t, shardNum+1, len(e.shardPools))
+
+ require.NoError(t, e.Close(context.Background()))
})
t.Run("remove shards", func(t *testing.T) {
@@ -256,47 +227,47 @@ func TestReload(t *testing.T) {
e, currShards := engineWithShards(t, removePath, shardNum)
var rcfg ReConfiguration
- for i := 0; i < len(currShards)-1; i++ { // without one of the shards
+ for i := range len(currShards) - 1 { // without one of the shards
rcfg.AddShard(currShards[i], nil)
}
- require.NoError(t, e.Reload(rcfg))
+ require.NoError(t, e.Reload(context.Background(), rcfg))
// removed one
require.Equal(t, shardNum-1, len(e.shards))
- require.Equal(t, shardNum-1, len(e.shardPools))
+
+ require.NoError(t, e.Close(context.Background()))
})
}
// engineWithShards creates engine with specified number of shards. Returns
// slice of paths to their metabase and the engine.
-// TODO: #1776 unify engine construction in tests
func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []string) {
addPath := filepath.Join(path, "add")
currShards := make([]string, 0, num)
- e := New()
- for i := 0; i < num; i++ {
- id, err := e.AddShard(
- shard.WithBlobStorOptions(
- blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(i)), errSmallSize))),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", i))),
- meta.WithPermissions(0700),
- meta.WithEpochState(epochState{}),
- ),
- )
- require.NoError(t, err)
+ te := testNewEngine(t).
+ setShardsNumOpts(t, num, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithLogger(test.NewLogger(t)),
+ shard.WithBlobStorOptions(
+ blobstor.WithStorages(newStorages(t, filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
+ shard.WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", id))),
+ meta.WithPermissions(0o700),
+ meta.WithEpochState(epochState{}),
+ ),
+ }
+ }).
+ prepare(t)
+ e, ids := te.engine, te.shardIDs
+ for _, id := range ids {
currShards = append(currShards, calculateShardID(e.shards[id.String()].DumpInfo()))
}
require.Equal(t, num, len(e.shards))
- require.Equal(t, num, len(e.shardPools))
-
- require.NoError(t, e.Open())
- require.NoError(t, e.Init())
return e, currShards
}
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index a49b1e8fa7..223cdbc488 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -1,12 +1,18 @@
package engine
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -17,9 +23,6 @@ type DeletePrm struct {
forceRemoval bool
}
-// DeleteRes groups the resulting values of Delete operation.
-type DeleteRes struct{}
-
// WithAddress is a Delete option to set the addresses of the objects to delete.
//
// Option is required.
@@ -44,47 +47,53 @@ func (p *DeletePrm) WithForceRemoval() {
// NOTE: Marks any object to be deleted (despite any prohibitions
// on operations with that object) if WithForceRemoval option has
// been provided.
-func (e *StorageEngine) Delete(prm DeletePrm) (res DeleteRes, err error) {
- err = e.execIfNotBlocked(func() error {
- res, err = e.delete(prm)
- return err
- })
+func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("force_removal", prm.forceRemoval),
+ ))
+ defer span.End()
+ defer elapsed("Delete", e.metrics.AddMethodDuration)()
- return
+ return e.execIfNotBlocked(func() error {
+ return e.delete(ctx, prm)
+ })
}
-func (e *StorageEngine) delete(prm DeletePrm) (DeleteRes, error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddDeleteDuration)()
- }
-
+func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
var locked struct {
- is bool
- err apistatus.ObjectLocked
+ is bool
}
var splitInfo *objectSDK.SplitInfo
+ var ecInfo *objectSDK.ECInfo
// Removal of a big object is done in multiple stages:
// 1. Remove the parent object. If it is locked or already removed, return immediately.
// 2. Otherwise, search for all objects with a particular SplitID and delete them too.
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
+ if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) {
var existsPrm shard.ExistsPrm
- existsPrm.SetAddress(prm.addr)
+ existsPrm.Address = prm.addr
- resExists, err := sh.Exists(existsPrm)
+ resExists, err := sh.Exists(ctx, existsPrm)
if err != nil {
- if shard.IsErrRemoved(err) || shard.IsErrObjectExpired(err) {
+ if client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err) {
return true
}
var splitErr *objectSDK.SplitInfoError
- if !errors.As(err, &splitErr) {
- if !shard.IsErrNotFound(err) {
- e.reportShardError(sh, "could not check object existence", err)
+ var ecErr *objectSDK.ECInfoError
+ if errors.As(err, &splitErr) {
+ splitInfo = splitErr.SplitInfo()
+ } else if errors.As(err, &ecErr) {
+ e.deleteChunks(ctx, sh, ecInfo, prm)
+ return false
+ } else {
+ if !client.IsErrObjectNotFound(err) {
+ e.reportShardError(ctx, sh, "could not check object existence", err, zap.Stringer("address", prm.addr))
}
return false
}
- splitInfo = splitErr.SplitInfo()
} else if !resExists.Exists() {
return false
}
@@ -95,63 +104,93 @@ func (e *StorageEngine) delete(prm DeletePrm) (DeleteRes, error) {
shPrm.ForceRemoval()
}
- _, err = sh.Inhume(shPrm)
+ _, err = sh.Inhume(ctx, shPrm)
if err != nil {
- e.reportShardError(sh, "could not inhume object in shard", err)
+ e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr))
- locked.is = errors.As(err, &locked.err)
+ var target *apistatus.ObjectLocked
+ locked.is = errors.As(err, &target)
return locked.is
}
// If a parent object is removed we should set GC mark on each shard.
return splitInfo == nil
- })
+ }); err != nil {
+ return err
+ }
if locked.is {
- return DeleteRes{}, locked.err
+ return new(apistatus.ObjectLocked)
}
if splitInfo != nil {
- e.deleteChildren(prm.addr, prm.forceRemoval, splitInfo.SplitID())
+ return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID())
}
- return DeleteRes{}, nil
+ return nil
}
-func (e *StorageEngine) deleteChildren(addr oid.Address, force bool, splitID *objectSDK.SplitID) {
+func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error {
var fs objectSDK.SearchFilters
fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID)
var selectPrm shard.SelectPrm
selectPrm.SetFilters(fs)
- selectPrm.SetContainerID(addr.Container())
+ selectPrm.SetContainerID(addr.Container(), false) // doesn't matter for search by splitID
var inhumePrm shard.InhumePrm
if force {
inhumePrm.ForceRemoval()
}
- e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
- res, err := sh.Select(selectPrm)
+ return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
+ res, err := sh.Select(ctx, selectPrm)
if err != nil {
- e.log.Warn("error during searching for object children",
+ e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
zap.Stringer("addr", addr),
- zap.String("error", err.Error()))
+ zap.Error(err))
return false
}
for _, addr := range res.AddressList() {
inhumePrm.MarkAsGarbage(addr)
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(ctx, inhumePrm)
if err != nil {
- e.log.Debug("could not inhume object in shard",
+ e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
zap.Stringer("addr", addr),
- zap.String("err", err.Error()))
+ zap.Error(err))
continue
}
}
return false
})
}
+
+func (e *StorageEngine) deleteChunks(
+ ctx context.Context, sh hashedShard, ecInfo *objectSDK.ECInfo, prm DeletePrm,
+) {
+ var inhumePrm shard.InhumePrm
+ if prm.forceRemoval {
+ inhumePrm.ForceRemoval()
+ }
+ for _, chunk := range ecInfo.Chunks {
+ var addr oid.Address
+ addr.SetContainer(prm.addr.Container())
+ var objID oid.ID
+ err := objID.ReadFromV2(chunk.ID)
+ if err != nil {
+ e.reportShardError(ctx, sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr))
+ }
+ addr.SetObject(objID)
+ inhumePrm.MarkAsGarbage(addr)
+ _, err = sh.Inhume(ctx, inhumePrm)
+ if err != nil {
+ e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
+ zap.Stringer("addr", addr),
+ zap.Error(err))
+ continue
+ }
+ }
+}
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index ff604f660e..a56598c09d 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -1,28 +1,28 @@
package engine
import (
- "os"
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
)
func TestDeleteBigObject(t *testing.T) {
- defer os.RemoveAll(t.Name())
+ t.Parallel()
cnr := cidtest.ID()
parentID := oidtest.ID()
splitID := objectSDK.NewSplitID()
- parent := generateObjectWithCID(t, cnr)
+ parent := testutil.GenerateObjectWithCID(cnr)
parent.SetID(parentID)
parent.SetPayload(nil)
@@ -30,7 +30,7 @@ func TestDeleteBigObject(t *testing.T) {
children := make([]*objectSDK.Object, childCount)
childIDs := make([]oid.ID, childCount)
for i := range children {
- children[i] = generateObjectWithCID(t, cnr)
+ children[i] = testutil.GenerateObjectWithCID(cnr)
if i != 0 {
children[i].SetPreviousID(childIDs[i-1])
}
@@ -42,58 +42,143 @@ func TestDeleteBigObject(t *testing.T) {
childIDs[i], _ = children[i].ID()
}
- link := generateObjectWithCID(t, cnr)
+ link := testutil.GenerateObjectWithCID(cnr)
link.SetParent(parent)
link.SetParentID(parentID)
link.SetSplitID(splitID)
link.SetChildren(childIDs...)
- s1 := testNewShard(t, 1)
- s2 := testNewShard(t, 2)
- s3 := testNewShard(t, 3)
-
- e := testNewEngineWithShards(s1, s2, s3)
- e.log = &logger.Logger{Logger: zaptest.NewLogger(t)}
- defer e.Close()
+ e := testNewEngine(t).setShardsNum(t, 3).prepare(t).engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
for i := range children {
- require.NoError(t, Put(e, children[i]))
+ require.NoError(t, Put(context.Background(), e, children[i], false))
}
- require.NoError(t, Put(e, link))
-
- var splitErr *objectSDK.SplitInfoError
+ require.NoError(t, Put(context.Background(), e, link, false))
addrParent := object.AddressOf(parent)
- checkGetError(t, e, addrParent, &splitErr)
+ checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
addrLink := object.AddressOf(link)
- checkGetError(t, e, addrLink, nil)
+ checkGetError[error](t, e, addrLink, false)
for i := range children {
- checkGetError(t, e, object.AddressOf(children[i]), nil)
+ checkGetError[error](t, e, object.AddressOf(children[i]), false)
}
var deletePrm DeletePrm
deletePrm.WithForceRemoval()
deletePrm.WithAddress(addrParent)
- _, err := e.Delete(deletePrm)
- require.NoError(t, err)
+ require.NoError(t, e.Delete(context.Background(), deletePrm))
- checkGetError(t, e, addrParent, &apistatus.ObjectNotFound{})
- checkGetError(t, e, addrLink, &apistatus.ObjectNotFound{})
+ checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
+ checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
for i := range children {
- checkGetError(t, e, object.AddressOf(children[i]), &apistatus.ObjectNotFound{})
+ checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true)
}
}
-func checkGetError(t *testing.T, e *StorageEngine, addr oid.Address, expected any) {
+func TestDeleteBigObjectWithoutGC(t *testing.T) {
+ t.Parallel()
+
+ cnr := cidtest.ID()
+ parentID := oidtest.ID()
+ splitID := objectSDK.NewSplitID()
+
+ parent := testutil.GenerateObjectWithCID(cnr)
+ parent.SetID(parentID)
+ parent.SetPayload(nil)
+
+ const childCount = 3
+ children := make([]*objectSDK.Object, childCount)
+ childIDs := make([]oid.ID, childCount)
+ for i := range children {
+ children[i] = testutil.GenerateObjectWithCID(cnr)
+ if i != 0 {
+ children[i].SetPreviousID(childIDs[i-1])
+ }
+ if i == len(children)-1 {
+ children[i].SetParent(parent)
+ }
+ children[i].SetSplitID(splitID)
+ children[i].SetPayload([]byte{byte(i), byte(i + 1), byte(i + 2)})
+ childIDs[i], _ = children[i].ID()
+ }
+
+ link := testutil.GenerateObjectWithCID(cnr)
+ link.SetParent(parent)
+ link.SetParentID(parentID)
+ link.SetSplitID(splitID)
+ link.SetChildren(childIDs...)
+
+ te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
+ return []shard.Option{shard.WithDisabledGC()}
+ }).prepare(t)
+ e := te.engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
+
+ s1 := te.shards[0]
+
+ for i := range children {
+ require.NoError(t, Put(context.Background(), e, children[i], false))
+ }
+ require.NoError(t, Put(context.Background(), e, link, false))
+
+ addrParent := object.AddressOf(parent)
+ checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
+
+ addrLink := object.AddressOf(link)
+ checkGetError[error](t, e, addrLink, false)
+
+ for i := range children {
+ checkGetError[error](t, e, object.AddressOf(children[i]), false)
+ }
+
+ // delete logical
+ var deletePrm DeletePrm
+ deletePrm.WithForceRemoval()
+ deletePrm.WithAddress(addrParent)
+
+ require.NoError(t, e.Delete(context.Background(), deletePrm))
+
+ checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
+ checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
+ for i := range children {
+ checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true)
+ }
+
+ // delete physical
+ var delPrm shard.DeletePrm
+ delPrm.SetAddresses(addrParent)
+ _, err := s1.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
+
+ delPrm.SetAddresses(addrLink)
+ _, err = s1.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
+
+ for i := range children {
+ delPrm.SetAddresses(object.AddressOf(children[i]))
+ _, err = s1.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
+ }
+
+ checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
+ checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
+ for i := range children {
+ checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true)
+ }
+}
+
+func checkGetError[E error](t *testing.T, e *StorageEngine, addr oid.Address, shouldFail bool) {
var getPrm GetPrm
getPrm.WithAddress(addr)
- _, err := e.Get(getPrm)
- if expected != nil {
- require.ErrorAs(t, err, expected)
+ _, err := e.Get(context.Background(), getPrm)
+ if shouldFail {
+ var target E
+ require.ErrorAs(t, err, &target)
} else {
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/engine/dump.go b/pkg/local_object_storage/engine/dump.go
deleted file mode 100644
index f5cf8c32e1..0000000000
--- a/pkg/local_object_storage/engine/dump.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package engine
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
-
-// DumpShard dumps objects from the shard with provided identifier.
-//
-// Returns an error if shard is not read-only.
-func (e *StorageEngine) DumpShard(id *shard.ID, prm shard.DumpPrm) error {
- e.mtx.RLock()
- defer e.mtx.RUnlock()
-
- sh, ok := e.shards[id.String()]
- if !ok {
- return errShardNotFound
- }
-
- _, err := sh.Dump(prm)
- return err
-}
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 4d154d2899..376d545d38 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -1,15 +1,20 @@
package engine
import (
+ "context"
"errors"
"sync"
+ "sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/atomic"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
@@ -17,21 +22,21 @@ import (
type StorageEngine struct {
*cfg
- mtx *sync.RWMutex
+ removeDuplicatesInProgress atomic.Bool
+
+ mtx sync.RWMutex
shards map[string]hashedShard
- shardPools map[string]util.WorkerPool
-
closeCh chan struct{}
setModeCh chan setModeRequest
wg sync.WaitGroup
blockExec struct {
- mtx sync.RWMutex
-
- err error
+ mtx sync.RWMutex
+ closed bool
}
+ evacuateLimiter *evacuationLimiter
}
type shardWrapper struct {
@@ -41,12 +46,13 @@ type shardWrapper struct {
type setModeRequest struct {
sh *shard.Shard
+ isMeta bool
errorCount uint32
}
// setModeLoop listens setModeCh to perform degraded mode transition of a single shard.
// Instead of creating a worker per single shard we use a single goroutine.
-func (e *StorageEngine) setModeLoop() {
+func (e *StorageEngine) setModeLoop(ctx context.Context) {
defer e.wg.Done()
var (
@@ -66,7 +72,7 @@ func (e *StorageEngine) setModeLoop() {
if !ok {
inProgress[sid] = struct{}{}
go func() {
- e.moveToDegraded(r.sh, r.errorCount)
+ e.moveToDegraded(ctx, r.sh, r.errorCount, r.isMeta)
mtx.Lock()
delete(inProgress, sid)
@@ -78,40 +84,36 @@ func (e *StorageEngine) setModeLoop() {
}
}
-func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32) {
+func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, errCount uint32, isMeta bool) {
+ sid := sh.ID()
+ log := e.log.With(
+ zap.Stringer("shard_id", sid),
+ zap.Uint32("error count", errCount))
+
e.mtx.RLock()
defer e.mtx.RUnlock()
- sid := sh.ID()
- err := sh.SetMode(mode.DegradedReadOnly)
- if err != nil {
- e.log.Error("failed to move shard in degraded-read-only mode, moving to read-only",
- zap.Stringer("shard_id", sid),
- zap.Uint32("error count", errCount),
- zap.Error(err))
-
- err = sh.SetMode(mode.ReadOnly)
- if err != nil {
- e.log.Error("failed to move shard in read-only mode",
- zap.Stringer("shard_id", sid),
- zap.Uint32("error count", errCount),
- zap.Error(err))
- } else {
- e.log.Info("shard is moved in read-only mode due to error threshold",
- zap.Stringer("shard_id", sid),
- zap.Uint32("error count", errCount))
+ if isMeta {
+ err := sh.SetMode(ctx, mode.DegradedReadOnly)
+ if err == nil {
+ log.Info(ctx, logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
+ return
}
- } else {
- e.log.Info("shard is moved in degraded mode due to error threshold",
- zap.Stringer("shard_id", sid),
- zap.Uint32("error count", errCount))
+ log.Error(ctx, logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
+ zap.Error(err))
}
+
+ err := sh.SetMode(ctx, mode.ReadOnly)
+ if err != nil {
+ log.Error(ctx, logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
+ return
+ }
+
+ log.Info(ctx, logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
}
-// reportShardErrorBackground increases shard error counter and logs an error.
-// It is intended to be used from background workers and
-// doesn't change shard mode because of possible deadlocks.
-func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err error) {
+// reportShardErrorByID increases shard error counter and logs an error.
+func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg string, err error) {
e.mtx.RLock()
sh, ok := e.shards[id]
e.mtx.RUnlock()
@@ -120,75 +122,61 @@ func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err er
return
}
- if isLogical(err) {
- e.log.Warn(msg,
- zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()))
- return
- }
-
- errCount := sh.errorCount.Inc()
- e.reportShardErrorWithFlags(sh.Shard, errCount, false, msg, err)
+ e.reportShardError(ctx, sh, msg, err)
}
// reportShardError checks that the amount of errors doesn't exceed the configured threshold.
// If it does, shard is set to read-only mode.
func (e *StorageEngine) reportShardError(
+ ctx context.Context,
sh hashedShard,
msg string,
err error,
- fields ...zap.Field) {
+ fields ...zap.Field,
+) {
if isLogical(err) {
- e.log.Warn(msg,
+ e.log.Warn(ctx, msg,
zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()))
+ zap.Error(err))
return
}
- errCount := sh.errorCount.Inc()
- e.reportShardErrorWithFlags(sh.Shard, errCount, true, msg, err, fields...)
-}
+ errCount := sh.errorCount.Add(1)
+ e.metrics.IncErrorCounter(sh.ID().String())
-func (e *StorageEngine) reportShardErrorWithFlags(
- sh *shard.Shard,
- errCount uint32,
- block bool,
- msg string,
- err error,
- fields ...zap.Field) {
sid := sh.ID()
- e.log.Warn(msg, append([]zap.Field{
+ e.log.Warn(ctx, msg, append([]zap.Field{
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount),
- zap.String("error", err.Error()),
+ zap.Error(err),
}, fields...)...)
if e.errorsThreshold == 0 || errCount < e.errorsThreshold {
return
}
- if block {
- e.moveToDegraded(sh, errCount)
- } else {
- req := setModeRequest{
- errorCount: errCount,
- sh: sh,
- }
+ req := setModeRequest{
+ errorCount: errCount,
+ sh: sh.Shard,
+ isMeta: errors.As(err, new(metaerr.Error)),
+ }
- select {
- case e.setModeCh <- req:
- default:
- // For background workers we can have a lot of such errors,
- // thus logging is done with DEBUG level.
- e.log.Debug("mode change is in progress, ignoring set-mode request",
- zap.Stringer("shard_id", sid),
- zap.Uint32("error_count", errCount))
- }
+ select {
+ case e.setModeCh <- req:
+ default:
+ // For background workers we can have a lot of such errors,
+ // thus logging is done with DEBUG level.
+ e.log.Debug(ctx, logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
+ zap.Stringer("shard_id", sid),
+ zap.Uint32("error_count", errCount))
}
}
func isLogical(err error) bool {
- return errors.As(err, &logicerr.Logical{})
+ return errors.As(err, &logicerr.Logical{}) ||
+ errors.Is(err, context.Canceled) ||
+ errors.Is(err, context.DeadlineExceeded) ||
+ errors.As(err, new(*apistatus.ResourceExhausted))
}
// Option represents StorageEngine's constructor option.
@@ -201,15 +189,18 @@ type cfg struct {
metrics MetricRegister
- shardPoolSize uint32
+ lowMem bool
+
+ containerSource atomic.Pointer[containerSource]
}
func defaultCfg() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
-
- shardPoolSize: 20,
+ res := &cfg{
+ log: logger.NewLoggerWrapper(zap.L()),
+ metrics: noopMetrics{},
}
+ res.containerSource.Store(&containerSource{})
+ return res
}
// New creates, initializes and returns new StorageEngine instance.
@@ -220,13 +211,18 @@ func New(opts ...Option) *StorageEngine {
opts[i](c)
}
+ evLimMtx := &sync.RWMutex{}
+ evLimCond := sync.NewCond(evLimMtx)
+
return &StorageEngine{
- cfg: c,
- mtx: new(sync.RWMutex),
- shards: make(map[string]hashedShard),
- shardPools: make(map[string]util.WorkerPool),
- closeCh: make(chan struct{}),
- setModeCh: make(chan setModeRequest),
+ cfg: c,
+ shards: make(map[string]hashedShard),
+ closeCh: make(chan struct{}),
+ setModeCh: make(chan setModeRequest),
+ evacuateLimiter: &evacuationLimiter{
+ guard: evLimMtx,
+ statusCond: evLimCond,
+ },
}
}
@@ -243,13 +239,6 @@ func WithMetrics(v MetricRegister) Option {
}
}
-// WithShardPoolSize returns option to specify size of worker pool for each shard.
-func WithShardPoolSize(sz uint32) Option {
- return func(c *cfg) {
- c.shardPoolSize = sz
- }
-}
-
// WithErrorThreshold returns an option to specify size amount of errors after which
// shard is moved to read-only mode.
func WithErrorThreshold(sz uint32) Option {
@@ -257,3 +246,37 @@ func WithErrorThreshold(sz uint32) Option {
c.errorsThreshold = sz
}
}
+
+// WithLowMemoryConsumption returns an option to set the flag to reduce memory consumption by reducing performance.
+func WithLowMemoryConsumption(lowMemCons bool) Option {
+ return func(c *cfg) {
+ c.lowMem = lowMemCons
+ }
+}
+
+// SetContainerSource sets container source.
+func (e *StorageEngine) SetContainerSource(cs container.Source) {
+ e.containerSource.Store(&containerSource{cs: cs})
+}
+
+type containerSource struct {
+ cs container.Source
+}
+
+func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) (bool, error) {
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ default:
+ }
+
+ if s == nil || s.cs == nil {
+ return true, nil
+ }
+
+ wasRemoved, err := container.WasRemoved(ctx, s.cs, id)
+ if err != nil {
+ return false, err
+ }
+ return !wasRemoved, nil
+}
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index a2dced6070..fc6d9ee9c7 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -1,213 +1,234 @@
package engine
import (
+ "context"
"fmt"
- "os"
"path/filepath"
+ "runtime/debug"
+ "strings"
+ "sync"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "git.frostfs.info/TrueCloudLab/hrw"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
- "github.com/panjf2000/ants/v2"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
- "go.uber.org/atomic"
- "go.uber.org/zap"
)
-type epochState struct{}
+type epochState struct {
+ currEpoch uint64
+}
func (s epochState) CurrentEpoch() uint64 {
- return 0
+ return s.currEpoch
}
-func BenchmarkExists(b *testing.B) {
- b.Run("2 shards", func(b *testing.B) {
- benchmarkExists(b, 2)
- })
- b.Run("4 shards", func(b *testing.B) {
- benchmarkExists(b, 4)
- })
- b.Run("8 shards", func(b *testing.B) {
- benchmarkExists(b, 8)
+type testEngineWrapper struct {
+ engine *StorageEngine
+ shards []*shard.Shard
+ shardIDs []*shard.ID
+}
+
+func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
+ opts = append(testGetDefaultEngineOptions(t), opts...)
+ return &testEngineWrapper{engine: New(opts...)}
+}
+
+func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
+ return te.setShardsNumOpts(t, num, func(_ int) []shard.Option {
+ return testGetDefaultShardOptions(t)
})
}
-func benchmarkExists(b *testing.B, shardNum int) {
- shards := make([]*shard.Shard, shardNum)
- for i := 0; i < shardNum; i++ {
- shards[i] = testNewShard(b, i)
+func (te *testEngineWrapper) setShardsNumOpts(
+ t testing.TB, num int, shardOpts func(id int) []shard.Option,
+) *testEngineWrapper {
+ te.shards = make([]*shard.Shard, num)
+ te.shardIDs = make([]*shard.ID, num)
+ for i := range num {
+ shard, err := te.engine.createShard(context.Background(), shardOpts(i))
+ require.NoError(t, err)
+ require.NoError(t, te.engine.addShard(shard))
+ te.shards[i] = shard
+ te.shardIDs[i] = shard.ID()
}
+ require.Len(t, te.engine.shards, num)
+ return te
+}
- e := testNewEngineWithShards(shards...)
- b.Cleanup(func() {
- _ = e.Close()
- _ = os.RemoveAll(b.Name())
+func (te *testEngineWrapper) setShardsNumAdditionalOpts(
+ t testing.TB, num int, shardOpts func(id int) []shard.Option,
+) *testEngineWrapper {
+ return te.setShardsNumOpts(t, num, func(id int) []shard.Option {
+ return append(testGetDefaultShardOptions(t), shardOpts(id)...)
})
+}
- addr := oidtest.Address()
- for i := 0; i < 100; i++ {
- obj := generateObjectWithCID(b, cidtest.ID())
- err := Put(e, obj)
- if err != nil {
- b.Fatal(err)
- }
- }
+// prepare calls Open and Init on the created engine.
+func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper {
+ require.NoError(t, te.engine.Open(context.Background()))
+ require.NoError(t, te.engine.Init(context.Background()))
+ return te
+}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- ok, err := e.exists(addr)
- if err != nil || ok {
- b.Fatalf("%t %v", ok, err)
- }
+func testGetDefaultEngineOptions(t testing.TB) []Option {
+ return []Option{
+ WithLogger(test.NewLogger(t)),
}
}
-func testNewEngineWithShards(shards ...*shard.Shard) *StorageEngine {
- engine := New()
-
- for _, s := range shards {
- pool, err := ants.NewPool(10, ants.WithNonblocking(true))
- if err != nil {
- panic(err)
- }
-
- engine.shards[s.ID().String()] = hashedShard{
- shardWrapper: shardWrapper{
- errorCount: atomic.NewUint32(0),
- Shard: s,
- },
- hash: hrw.Hash([]byte(s.ID().String())),
- }
- engine.shardPools[s.ID().String()] = pool
+func testGetDefaultShardOptions(t testing.TB) []shard.Option {
+ return []shard.Option{
+ shard.WithLogger(test.NewLogger(t)),
+ shard.WithBlobStorOptions(
+ blobstor.WithStorages(
+ newStorages(t, t.TempDir(), 1<<20)),
+ blobstor.WithLogger(test.NewLogger(t)),
+ ),
+ shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
+ shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...),
+ shard.WithLimiter(&testQoSLimiter{t: t}),
}
-
- return engine
}
-func newStorages(root string, smallSize uint64) []blobstor.SubStorage {
+func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option {
+ return []meta.Option{
+ meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
+ meta.WithPermissions(0o700),
+ meta.WithEpochState(epochState{}),
+ meta.WithLogger(test.NewLogger(t)),
+ }
+}
+
+func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
return []blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
+ context.Background(),
blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1),
- blobovniczatree.WithPermissions(0700)),
- Policy: func(_ *object.Object, data []byte) bool {
+ blobovniczatree.WithPermissions(0o700),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))),
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < smallSize
},
},
{
Storage: fstree.New(
fstree.WithPath(root),
- fstree.WithDepth(1)),
+ fstree.WithDepth(1),
+ fstree.WithLogger(test.NewLogger(t))),
},
}
}
-func testNewShard(t testing.TB, id int) *shard.Shard {
- sid, err := generateShardID()
- require.NoError(t, err)
-
- s := shard.New(
- shard.WithID(sid),
- shard.WithLogger(&logger.Logger{Logger: zap.L()}),
- shard.WithBlobStorOptions(
- blobstor.WithStorages(
- newStorages(filepath.Join(t.Name(), fmt.Sprintf("%d.blobstor", id)),
- 1<<20))),
- shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.Name(), fmt.Sprintf("%d.pilorama", id)))),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("%d.metabase", id))),
- meta.WithPermissions(0700),
- meta.WithEpochState(epochState{}),
+func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *teststore.TestStore, *teststore.TestStore) {
+ smallFileStorage := teststore.New(
+ teststore.WithSubstorage(blobovniczatree.NewBlobovniczaTree(
+ context.Background(),
+ blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")),
+ blobovniczatree.WithBlobovniczaShallowDepth(1),
+ blobovniczatree.WithBlobovniczaShallowWidth(1),
+ blobovniczatree.WithPermissions(0o700)),
))
-
- require.NoError(t, s.Open())
- require.NoError(t, s.Init())
-
- return s
+ largeFileStorage := teststore.New(
+ teststore.WithSubstorage(fstree.New(
+ fstree.WithPath(root),
+ fstree.WithDepth(1)),
+ ))
+ return []blobstor.SubStorage{
+ {
+ Storage: smallFileStorage,
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
+ return uint64(len(data)) < smallSize
+ },
+ },
+ {
+ Storage: largeFileStorage,
+ },
+ }, smallFileStorage, largeFileStorage
}
-func testEngineFromShardOpts(t *testing.T, num int, extraOpts []shard.Option) *StorageEngine {
- engine := New()
- for i := 0; i < num; i++ {
- _, err := engine.AddShard(append([]shard.Option{
- shard.WithBlobStorOptions(
- blobstor.WithStorages(
- newStorages(filepath.Join(t.Name(), fmt.Sprintf("blobstor%d", i)),
- 1<<20)),
- ),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("metabase%d", i))),
- meta.WithPermissions(0700),
- meta.WithEpochState(epochState{}),
- ),
- shard.WithPiloramaOptions(
- pilorama.WithPath(filepath.Join(t.Name(), fmt.Sprintf("pilorama%d", i)))),
- }, extraOpts...)...)
- require.NoError(t, err)
+var _ qos.Limiter = (*testQoSLimiter)(nil)
+
+type testQoSLimiter struct {
+ t testing.TB
+ quard sync.Mutex
+ id int64
+ readStacks map[int64][]byte
+ writeStacks map[int64][]byte
+}
+
+func (t *testQoSLimiter) SetMetrics(qos.Metrics) {}
+
+func (t *testQoSLimiter) Close() {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ var sb strings.Builder
+ var seqN int
+ for _, stack := range t.readStacks {
+ seqN++
+ sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack)))
}
-
- require.NoError(t, engine.Open())
- require.NoError(t, engine.Init())
-
- return engine
-}
-
-func generateObjectWithCID(t testing.TB, cnr cid.ID) *object.Object {
- var ver version.Version
- ver.SetMajor(2)
- ver.SetMinor(1)
-
- csum := checksumtest.Checksum()
-
- var csumTZ checksum.Checksum
- csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
-
- obj := object.New()
- obj.SetID(oidtest.ID())
- obj.SetOwnerID(usertest.ID())
- obj.SetContainerID(cnr)
- obj.SetVersion(&ver)
- obj.SetPayloadChecksum(csum)
- obj.SetPayloadHomomorphicHash(csumTZ)
- obj.SetPayload([]byte{1, 2, 3, 4, 5})
-
- return obj
-}
-
-func addAttribute(obj *object.Object, key, val string) {
- var attr object.Attribute
- attr.SetKey(key)
- attr.SetValue(val)
-
- attrs := obj.Attributes()
- attrs = append(attrs, attr)
- obj.SetAttributes(attrs...)
-}
-
-func testNewEngineWithShardNum(t *testing.T, num int) *StorageEngine {
- shards := make([]*shard.Shard, 0, num)
-
- for i := 0; i < num; i++ {
- shards = append(shards, testNewShard(t, i))
+ for _, stack := range t.writeStacks {
+ seqN++
+ sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack)))
}
-
- return testNewEngineWithShards(shards...)
+ require.True(t.t, seqN == 0, sb.String())
}
+
+func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ stack := debug.Stack()
+
+ t.id++
+ id := t.id
+
+ if t.readStacks == nil {
+ t.readStacks = make(map[int64][]byte)
+ }
+ t.readStacks[id] = stack
+
+ return func() {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ delete(t.readStacks, id)
+ }, nil
+}
+
+func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ stack := debug.Stack()
+
+ t.id++
+ id := t.id
+
+ if t.writeStacks == nil {
+ t.writeStacks = make(map[int64][]byte)
+ }
+ t.writeStacks[id] = stack
+
+ return func() {
+ t.quard.Lock()
+ defer t.quard.Unlock()
+
+ delete(t.writeStacks, id)
+ }, nil
+}
+
+func (t *testQoSLimiter) SetParentID(string) {}
diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go
index bc205d8363..57029dd5fc 100644
--- a/pkg/local_object_storage/engine/error_test.go
+++ b/pkg/local_object_storage/engine/error_test.go
@@ -1,198 +1,225 @@
package engine
import (
+ "context"
"fmt"
"os"
"path/filepath"
"strconv"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
)
const errSmallSize = 256
-func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) (*StorageEngine, string, [2]*shard.ID) {
+type testEngine struct {
+ ng *StorageEngine
+ dir string
+ shards [2]*testShard
+}
+
+type testShard struct {
+ id *shard.ID
+ smallFileStorage *teststore.TestStore
+ largeFileStorage *teststore.TestStore
+}
+
+func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) *testEngine {
if dir == "" {
- var err error
-
- dir, err = os.MkdirTemp("", "*")
- require.NoError(t, err)
- t.Cleanup(func() { _ = os.RemoveAll(dir) })
+ dir = t.TempDir()
}
- e := New(
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- WithShardPoolSize(1),
- WithErrorThreshold(errThreshold))
+ var testShards [2]*testShard
- var ids [2]*shard.ID
- var err error
+ te := testNewEngine(t,
+ WithErrorThreshold(errThreshold),
+ ).
+ setShardsNumOpts(t, 2, func(id int) []shard.Option {
+ storages, smallFileStorage, largeFileStorage := newTestStorages(filepath.Join(dir, strconv.Itoa(id)), errSmallSize)
+ testShards[id] = &testShard{
+ smallFileStorage: smallFileStorage,
+ largeFileStorage: largeFileStorage,
+ }
+ return []shard.Option{
+ shard.WithLogger(test.NewLogger(t)),
+ shard.WithBlobStorOptions(blobstor.WithStorages(storages)),
+ shard.WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),
+ meta.WithPermissions(0o700),
+ meta.WithEpochState(epochState{}),
+ ),
+ shard.WithPiloramaOptions(
+ pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
+ pilorama.WithPerm(0o700)),
+ }
+ }).prepare(t)
+ e := te.engine
- for i := range ids {
- ids[i], err = e.AddShard(
- shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- shard.WithBlobStorOptions(
- blobstor.WithStorages(newStorages(filepath.Join(dir, strconv.Itoa(i)), errSmallSize))),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", i))),
- meta.WithPermissions(0700),
- meta.WithEpochState(epochState{}),
- ),
- shard.WithPiloramaOptions(
- pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", i))),
- pilorama.WithPerm(0700)))
- require.NoError(t, err)
+ for i, id := range te.shardIDs {
+ testShards[i].id = id
}
- require.NoError(t, e.Open())
- require.NoError(t, e.Init())
- return e, dir, ids
+ return &testEngine{
+ ng: e,
+ dir: dir,
+ shards: testShards,
+ }
}
func TestErrorReporting(t *testing.T) {
t.Run("ignore errors by default", func(t *testing.T) {
- e, dir, id := newEngineWithErrorThreshold(t, "", 0)
+ te := newEngineWithErrorThreshold(t, "", 0)
- obj := generateObjectWithCID(t, cidtest.ID())
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
obj.SetPayload(make([]byte, errSmallSize))
var prm shard.PutPrm
prm.SetObject(obj)
- e.mtx.RLock()
- _, err := e.shards[id[0].String()].Shard.Put(prm)
- e.mtx.RUnlock()
+ te.ng.mtx.RLock()
+ _, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
+ te.ng.mtx.RUnlock()
require.NoError(t, err)
- _, err = e.Get(GetPrm{addr: object.AddressOf(obj)})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.NoError(t, err)
- checkShardState(t, e, id[0], 0, mode.ReadWrite)
- checkShardState(t, e, id[1], 0, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
- corruptSubDir(t, filepath.Join(dir, "0"))
+ for _, shard := range te.shards {
+ shard.largeFileStorage.SetOption(teststore.WithGet(func(common.GetPrm) (common.GetRes, error) {
+ return common.GetRes{}, teststore.ErrDiskExploded
+ }))
+ }
for i := uint32(1); i < 3; i++ {
- _, err = e.Get(GetPrm{addr: object.AddressOf(obj)})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err)
- checkShardState(t, e, id[0], i, mode.ReadWrite)
- checkShardState(t, e, id[1], 0, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
+ require.NoError(t, te.ng.Close(context.Background()))
})
t.Run("with error threshold", func(t *testing.T) {
const errThreshold = 3
- e, dir, id := newEngineWithErrorThreshold(t, "", errThreshold)
+ te := newEngineWithErrorThreshold(t, "", errThreshold)
- obj := generateObjectWithCID(t, cidtest.ID())
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
obj.SetPayload(make([]byte, errSmallSize))
var prm shard.PutPrm
prm.SetObject(obj)
- e.mtx.RLock()
- _, err := e.shards[id[0].String()].Put(prm)
- e.mtx.RUnlock()
+ te.ng.mtx.RLock()
+ _, err := te.ng.shards[te.shards[0].id.String()].Put(context.Background(), prm)
+ te.ng.mtx.RUnlock()
require.NoError(t, err)
- _, err = e.Get(GetPrm{addr: object.AddressOf(obj)})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.NoError(t, err)
- checkShardState(t, e, id[0], 0, mode.ReadWrite)
- checkShardState(t, e, id[1], 0, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
- corruptSubDir(t, filepath.Join(dir, "0"))
+ for _, shard := range te.shards {
+ shard.largeFileStorage.SetOption(teststore.WithGet(func(common.GetPrm) (common.GetRes, error) {
+ return common.GetRes{}, teststore.ErrDiskExploded
+ }))
+ }
for i := uint32(1); i < errThreshold; i++ {
- _, err = e.Get(GetPrm{addr: object.AddressOf(obj)})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err)
- checkShardState(t, e, id[0], i, mode.ReadWrite)
- checkShardState(t, e, id[1], 0, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
- for i := uint32(0); i < 2; i++ {
- _, err = e.Get(GetPrm{addr: object.AddressOf(obj)})
+ for i := range uint32(2) {
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err)
- checkShardState(t, e, id[0], errThreshold+i, mode.DegradedReadOnly)
- checkShardState(t, e, id[1], 0, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly)
+ checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
- require.NoError(t, e.SetShardMode(id[0], mode.ReadWrite, false))
- checkShardState(t, e, id[0], errThreshold+1, mode.ReadWrite)
+ require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, false))
+ checkShardState(t, te.ng, te.shards[0].id, errThreshold+1, mode.ReadWrite)
- require.NoError(t, e.SetShardMode(id[0], mode.ReadWrite, true))
- checkShardState(t, e, id[0], 0, mode.ReadWrite)
+ require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, true))
+ checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
+ require.NoError(t, te.ng.Close(context.Background()))
})
}
-// Issue #1186.
func TestBlobstorFailback(t *testing.T) {
- dir, err := os.MkdirTemp("", "*")
- require.NoError(t, err)
- t.Cleanup(func() { require.NoError(t, os.RemoveAll(dir)) })
+ dir := t.TempDir()
- e, _, id := newEngineWithErrorThreshold(t, dir, 1)
+ te := newEngineWithErrorThreshold(t, dir, 1)
objs := make([]*objectSDK.Object, 0, 2)
for _, size := range []int{15, errSmallSize + 1} {
- obj := generateObjectWithCID(t, cidtest.ID())
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
obj.SetPayload(make([]byte, size))
var prm shard.PutPrm
prm.SetObject(obj)
- e.mtx.RLock()
- _, err = e.shards[id[0].String()].Shard.Put(prm)
- e.mtx.RUnlock()
+ te.ng.mtx.RLock()
+ _, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
+ te.ng.mtx.RUnlock()
require.NoError(t, err)
objs = append(objs, obj)
}
for i := range objs {
addr := object.AddressOf(objs[i])
- _, err = e.Get(GetPrm{addr: addr})
+ _, err := te.ng.Get(context.Background(), GetPrm{addr: addr})
require.NoError(t, err)
- _, err = e.GetRange(RngPrm{addr: addr})
+ _, err = te.ng.GetRange(context.Background(), RngPrm{addr: addr})
require.NoError(t, err)
}
- checkShardState(t, e, id[0], 0, mode.ReadWrite)
- require.NoError(t, e.Close())
+ checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
+ require.NoError(t, te.ng.Close(context.Background()))
- p1 := e.shards[id[0].String()].Shard.DumpInfo().BlobStorInfo.SubStorages[1].Path
- p2 := e.shards[id[1].String()].Shard.DumpInfo().BlobStorInfo.SubStorages[1].Path
+ p1 := te.ng.shards[te.shards[0].id.String()].Shard.DumpInfo().BlobStorInfo.SubStorages[1].Path
+ p2 := te.ng.shards[te.shards[1].id.String()].Shard.DumpInfo().BlobStorInfo.SubStorages[1].Path
tmp := filepath.Join(dir, "tmp")
require.NoError(t, os.Rename(p1, tmp))
require.NoError(t, os.Rename(p2, p1))
require.NoError(t, os.Rename(tmp, p2))
- e, _, id = newEngineWithErrorThreshold(t, dir, 1)
+ te = newEngineWithErrorThreshold(t, dir, 1)
for i := range objs {
addr := object.AddressOf(objs[i])
- getRes, err := e.Get(GetPrm{addr: addr})
+ getRes, err := te.ng.Get(context.Background(), GetPrm{addr: addr})
require.NoError(t, err)
require.Equal(t, objs[i], getRes.Object())
- rngRes, err := e.GetRange(RngPrm{addr: addr, off: 1, ln: 10})
+ rngRes, err := te.ng.GetRange(context.Background(), RngPrm{addr: addr, off: 1, ln: 10})
require.NoError(t, err)
require.Equal(t, objs[i].Payload()[1:11], rngRes.Object().Payload())
- _, err = e.GetRange(RngPrm{addr: addr, off: errSmallSize + 10, ln: 1})
- require.ErrorAs(t, err, &apistatus.ObjectOutOfRange{})
+ _, err = te.ng.GetRange(context.Background(), RngPrm{addr: addr, off: errSmallSize + 10, ln: 1})
+ require.True(t, shard.IsErrOutOfRange(err))
}
- checkShardState(t, e, id[0], 1, mode.DegradedReadOnly)
- checkShardState(t, e, id[1], 0, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
+ checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
+ require.NoError(t, te.ng.Close(context.Background()))
}
func checkShardState(t *testing.T, e *StorageEngine, id *shard.ID, errCount uint32, mode mode.Mode) {
@@ -200,22 +227,8 @@ func checkShardState(t *testing.T, e *StorageEngine, id *shard.ID, errCount uint
sh := e.shards[id.String()]
e.mtx.RUnlock()
- require.Equal(t, errCount, sh.errorCount.Load())
- require.Equal(t, mode, sh.GetMode())
-}
-
-// corruptSubDir makes random directory except "blobovnicza" in blobstor FSTree unreadable.
-func corruptSubDir(t *testing.T, dir string) {
- de, err := os.ReadDir(dir)
- require.NoError(t, err)
-
- // FIXME(@cthulhu-rider): copy-paste of unexported const from blobstor package, see #1407
- const dirBlobovnicza = "blobovnicza"
-
- for i := range de {
- if de[i].IsDir() && de[i].Name() != dirBlobovnicza {
- require.NoError(t, os.Chmod(filepath.Join(dir, de[i].Name()), 0))
- return
- }
- }
+ require.Eventually(t, func() bool {
+ return errCount == sh.errorCount.Load() &&
+ mode == sh.GetMode()
+ }, 10*time.Second, 10*time.Millisecond, "shard mode doesn't changed to expected state in 10 seconds")
}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 59965ba4f6..c08dfbf03d 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -1,193 +1,863 @@
package engine
import (
+ "context"
"errors"
"fmt"
+ "slices"
+ "strings"
+ "sync"
+ "sync/atomic"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
)
+const (
+ // containerWorkerCountDefault is a default value of the count of
+ // concurrent container evacuation workers.
+ containerWorkerCountDefault = 10
+ // objectWorkerCountDefault is a default value of the count of
+ // concurrent object evacuation workers.
+ objectWorkerCountDefault = 10
+)
+
+var (
+ ErrMustBeReadOnly = logicerr.New("shard must be in read-only mode")
+
+ evacuationOperationLogField = zap.String("operation", "evacuation")
+)
+
+// EvacuateScope is an evacuation scope. Keep in sync with pkg/services/control/service.proto.
+type EvacuateScope uint32
+
+var (
+ EvacuateScopeObjects EvacuateScope = 1
+ EvacuateScopeTrees EvacuateScope = 2
+)
+
+func (s EvacuateScope) String() string {
+ var sb strings.Builder
+ first := true
+ if s&EvacuateScopeObjects == EvacuateScopeObjects {
+ sb.WriteString("objects")
+ first = false
+ }
+ if s&EvacuateScopeTrees == EvacuateScopeTrees {
+ if !first {
+ sb.WriteString(";")
+ }
+ sb.WriteString("trees")
+ }
+ return sb.String()
+}
+
+func (s EvacuateScope) WithObjects() bool {
+ return s&EvacuateScopeObjects == EvacuateScopeObjects
+}
+
+func (s EvacuateScope) WithTrees() bool {
+ return s&EvacuateScopeTrees == EvacuateScopeTrees
+}
+
+func (s EvacuateScope) TreesOnly() bool {
+ return s == EvacuateScopeTrees
+}
+
// EvacuateShardPrm represents parameters for the EvacuateShard operation.
type EvacuateShardPrm struct {
- shardID []*shard.ID
- handler func(oid.Address, *objectSDK.Object) error
- ignoreErrors bool
+ ShardID []*shard.ID
+ ObjectsHandler func(context.Context, oid.Address, *objectSDK.Object) (bool, error)
+ TreeHandler func(context.Context, cid.ID, string, pilorama.Forest) (bool, string, error)
+ IgnoreErrors bool
+ Scope EvacuateScope
+ RepOneOnly bool
+
+ ContainerWorkerCount uint32
+ ObjectWorkerCount uint32
}
// EvacuateShardRes represents result of the EvacuateShard operation.
type EvacuateShardRes struct {
- count int
+ objEvacuated *atomic.Uint64
+ objTotal *atomic.Uint64
+ objFailed *atomic.Uint64
+ objSkipped *atomic.Uint64
+
+ trEvacuated *atomic.Uint64
+ trTotal *atomic.Uint64
+ trFailed *atomic.Uint64
}
-// WithShardIDList sets shard ID.
-func (p *EvacuateShardPrm) WithShardIDList(id []*shard.ID) {
- p.shardID = id
+// NewEvacuateShardRes creates new EvacuateShardRes instance.
+func NewEvacuateShardRes() *EvacuateShardRes {
+ return &EvacuateShardRes{
+ objEvacuated: new(atomic.Uint64),
+ objTotal: new(atomic.Uint64),
+ objFailed: new(atomic.Uint64),
+ objSkipped: new(atomic.Uint64),
+ trEvacuated: new(atomic.Uint64),
+ trTotal: new(atomic.Uint64),
+ trFailed: new(atomic.Uint64),
+ }
}
-// WithIgnoreErrors sets flag to ignore errors.
-func (p *EvacuateShardPrm) WithIgnoreErrors(ignore bool) {
- p.ignoreErrors = ignore
-}
-
-// WithFaultHandler sets handler to call for objects which cannot be saved on other shards.
-func (p *EvacuateShardPrm) WithFaultHandler(f func(oid.Address, *objectSDK.Object) error) {
- p.handler = f
-}
-
-// Count returns amount of evacuated objects.
+// ObjectsEvacuated returns amount of evacuated objects.
// Objects for which handler returned no error are also assumed evacuated.
-func (p EvacuateShardRes) Count() int {
- return p.count
+func (p *EvacuateShardRes) ObjectsEvacuated() uint64 {
+ if p == nil {
+ return 0
+ }
+ return p.objEvacuated.Load()
}
-const defaultEvacuateBatchSize = 100
+// ObjectsTotal returns total count objects to evacuate.
+func (p *EvacuateShardRes) ObjectsTotal() uint64 {
+ if p == nil {
+ return 0
+ }
+ return p.objTotal.Load()
+}
-type pooledShard struct {
- hashedShard
- pool util.WorkerPool
+// ObjectsFailed returns count of failed objects to evacuate.
+func (p *EvacuateShardRes) ObjectsFailed() uint64 {
+ if p == nil {
+ return 0
+ }
+ return p.objFailed.Load()
+}
+
+// ObjectsSkipped returns count of skipped objects.
+func (p *EvacuateShardRes) ObjectsSkipped() uint64 {
+ if p == nil {
+ return 0
+ }
+ return p.objSkipped.Load()
+}
+
+// TreesEvacuated returns amount of evacuated trees.
+func (p *EvacuateShardRes) TreesEvacuated() uint64 {
+ if p == nil {
+ return 0
+ }
+ return p.trEvacuated.Load()
+}
+
+// TreesTotal returns total count trees to evacuate.
+func (p *EvacuateShardRes) TreesTotal() uint64 {
+ if p == nil {
+ return 0
+ }
+ return p.trTotal.Load()
+}
+
+// TreesFailed returns count of failed trees to evacuate.
+func (p *EvacuateShardRes) TreesFailed() uint64 {
+ if p == nil {
+ return 0
+ }
+ return p.trFailed.Load()
+}
+
+// DeepCopy returns deep copy of result instance.
+func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes {
+ if p == nil {
+ return nil
+ }
+
+ res := &EvacuateShardRes{
+ objEvacuated: new(atomic.Uint64),
+ objTotal: new(atomic.Uint64),
+ objFailed: new(atomic.Uint64),
+ objSkipped: new(atomic.Uint64),
+ trEvacuated: new(atomic.Uint64),
+ trTotal: new(atomic.Uint64),
+ trFailed: new(atomic.Uint64),
+ }
+
+ res.objEvacuated.Store(p.objEvacuated.Load())
+ res.objTotal.Store(p.objTotal.Load())
+ res.objFailed.Store(p.objFailed.Load())
+ res.objSkipped.Store(p.objSkipped.Load())
+ res.trTotal.Store(p.trTotal.Load())
+ res.trEvacuated.Store(p.trEvacuated.Load())
+ res.trFailed.Store(p.trFailed.Load())
+ return res
}
var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
// Evacuate moves data from one shard to the others.
// The shard being moved must be in read-only mode.
-func (e *StorageEngine) Evacuate(prm EvacuateShardPrm) (EvacuateShardRes, error) {
- sidList := make([]string, len(prm.shardID))
- for i := range prm.shardID {
- sidList[i] = prm.shardID[i].String()
+func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
}
+ shardIDs := make([]string, len(prm.ShardID))
+ for i := range prm.ShardID {
+ shardIDs[i] = prm.ShardID[i].String()
+ }
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate",
+ trace.WithAttributes(
+ attribute.StringSlice("shardIDs", shardIDs),
+ attribute.Bool("ignoreErrors", prm.IgnoreErrors),
+ attribute.Stringer("scope", prm.Scope),
+ ))
+ defer span.End()
+
+ shards, err := e.getActualShards(shardIDs, prm)
+ if err != nil {
+ return err
+ }
+
+ shardsToEvacuate := make(map[string]*shard.Shard)
+ for i := range shardIDs {
+ for j := range shards {
+ if shards[j].ID().String() == shardIDs[i] {
+ shardsToEvacuate[shardIDs[i]] = shards[j].Shard
+ }
+ }
+ }
+
+ res := NewEvacuateShardRes()
+ ctx = context.WithoutCancel(ctx)
+ eg, ctx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
+ if err != nil {
+ return err
+ }
+
+ var mtx sync.RWMutex
+ copyShards := func() []hashedShard {
+ mtx.RLock()
+ defer mtx.RUnlock()
+ t := slices.Clone(shards)
+ return t
+ }
+ eg.Go(func() error {
+ return e.evacuateShards(ctx, shardIDs, prm, res, copyShards, shardsToEvacuate)
+ })
+
+ return nil
+}
+
+func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+) error {
+ var err error
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
+ trace.WithAttributes(
+ attribute.StringSlice("shardIDs", shardIDs),
+ attribute.Bool("ignoreErrors", prm.IgnoreErrors),
+ attribute.Stringer("scope", prm.Scope),
+ attribute.Bool("repOneOnly", prm.RepOneOnly),
+ ))
+
+ defer func() {
+ span.End()
+ e.evacuateLimiter.Complete(err)
+ }()
+
+ e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+ zap.Stringer("scope", prm.Scope))
+
+ err = e.getTotals(ctx, prm, shardsToEvacuate, res)
+ if err != nil {
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
+ zap.Stringer("scope", prm.Scope))
+ return err
+ }
+
+ ctx, cancel, egShard, egContainer, egObject := e.createErrorGroupsForEvacuation(ctx, prm)
+ continueLoop := true
+ for i := 0; continueLoop && i < len(shardIDs); i++ {
+ select {
+ case <-ctx.Done():
+ continueLoop = false
+ default:
+ egShard.Go(func() error {
+ err := e.evacuateShard(ctx, cancel, shardIDs[i], prm, res, shards, shardsToEvacuate, egContainer, egObject)
+ if err != nil {
+ cancel(err)
+ }
+ return err
+ })
+ }
+ }
+ err = egShard.Wait()
+ if err != nil {
+ err = fmt.Errorf("shard error: %w", err)
+ }
+ errContainer := egContainer.Wait()
+ errObject := egObject.Wait()
+ if errContainer != nil {
+ err = errors.Join(err, fmt.Errorf("container error: %w", errContainer))
+ }
+ if errObject != nil {
+ err = errors.Join(err, fmt.Errorf("object error: %w", errObject))
+ }
+ if err != nil {
+ e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+ zap.Stringer("scope", prm.Scope))
+ return err
+ }
+
+ e.log.Info(ctx, logs.EngineFinishedSuccessfullyShardsEvacuation,
+ zap.Strings("shard_ids", shardIDs),
+ evacuationOperationLogField,
+ zap.Uint64("total_objects", res.ObjectsTotal()),
+ zap.Uint64("evacuated_objects", res.ObjectsEvacuated()),
+ zap.Uint64("failed_objects", res.ObjectsFailed()),
+ zap.Uint64("skipped_objects", res.ObjectsSkipped()),
+ zap.Uint64("total_trees", res.TreesTotal()),
+ zap.Uint64("evacuated_trees", res.TreesEvacuated()),
+ zap.Uint64("failed_trees", res.TreesFailed()),
+ )
+ return nil
+}
+
+func (e *StorageEngine) createErrorGroupsForEvacuation(ctx context.Context, prm EvacuateShardPrm) (
+ context.Context, context.CancelCauseFunc, *errgroup.Group, *errgroup.Group, *errgroup.Group,
+) {
+ operationCtx, cancel := context.WithCancelCause(ctx)
+ egObject, _ := errgroup.WithContext(operationCtx)
+ objectWorkerCount := prm.ObjectWorkerCount
+ if objectWorkerCount == 0 {
+ objectWorkerCount = objectWorkerCountDefault
+ }
+ egObject.SetLimit(int(objectWorkerCount))
+ egContainer, _ := errgroup.WithContext(operationCtx)
+ containerWorkerCount := prm.ContainerWorkerCount
+ if containerWorkerCount == 0 {
+ containerWorkerCount = containerWorkerCountDefault
+ }
+ egContainer.SetLimit(int(containerWorkerCount))
+ egShard, _ := errgroup.WithContext(operationCtx)
+
+ return operationCtx, cancel, egShard, egContainer, egObject
+}
+
+func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotals")
+ defer span.End()
+
+ for _, sh := range shardsToEvacuate {
+ if prm.Scope.WithObjects() {
+ cnt, err := sh.LogicalObjectsCount(ctx)
+ if err != nil {
+ if errors.Is(err, shard.ErrDegradedMode) {
+ continue
+ }
+ return err
+ }
+ res.objTotal.Add(cnt)
+ }
+ if prm.Scope.WithTrees() && sh.PiloramaEnabled() {
+ cnt, err := pilorama.TreeCountAll(ctx, sh)
+ if err != nil {
+ return err
+ }
+ res.trTotal.Add(cnt)
+ }
+ }
+ return nil
+}
+
+func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+ egContainer *errgroup.Group, egObject *errgroup.Group,
+) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard",
+ trace.WithAttributes(
+ attribute.String("shardID", shardID),
+ ))
+ defer span.End()
+
+ if prm.Scope.WithObjects() {
+ if err := e.evacuateShardObjects(ctx, cancel, shardID, prm, res, shards, shardsToEvacuate, egContainer, egObject); err != nil {
+ return err
+ }
+ }
+ if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() {
+ if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+ egContainer *errgroup.Group, egObject *errgroup.Group,
+) error {
+ sh := shardsToEvacuate[shardID]
+ var cntPrm shard.IterateOverContainersPrm
+ cntPrm.Handler = func(ctx context.Context, objType objectSDK.Type, cnt cid.ID) error {
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
+ egContainer.Go(func() error {
+ var skip bool
+ c, err := e.containerSource.Load().cs.Get(ctx, cnt)
+ if err != nil {
+ if client.IsErrContainerNotFound(err) {
+ skip = true
+ } else {
+ return err
+ }
+ }
+ if !skip && prm.RepOneOnly {
+ skip = e.isNotRepOne(c)
+ }
+ if skip {
+ countPrm := shard.CountAliveObjectsInContainerPrm{
+ ObjectType: objType,
+ ContainerID: cnt,
+ }
+ count, err := sh.CountAliveObjectsInContainer(ctx, countPrm)
+ if err != nil {
+ return err
+ }
+ res.objSkipped.Add(count)
+ return nil
+ }
+ var objPrm shard.IterateOverObjectsInContainerPrm
+ objPrm.ObjectType = objType
+ objPrm.ContainerID = cnt
+ objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
+ egObject.Go(func() error {
+ err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate, c.Value)
+ if err != nil {
+ cancel(err)
+ }
+ return err
+ })
+ return nil
+ }
+ err = sh.IterateOverObjectsInContainer(ctx, objPrm)
+ if err != nil {
+ cancel(err)
+ }
+ return err
+ })
+ return nil
+ }
+
+ sh.SetEvacuationInProgress(true)
+ err := sh.IterateOverContainers(ctx, cntPrm)
+ if err != nil {
+ cancel(err)
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField)
+ }
+ return err
+}
+
+func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+) error {
+ sh := shardsToEvacuate[shardID]
+ shards := getShards()
+
+ var listPrm pilorama.TreeListTreesPrm
+ first := true
+
+ for len(listPrm.NextPageToken) > 0 || first {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ first = false
+
+ listRes, err := sh.TreeListTrees(ctx, listPrm)
+ if err != nil {
+ return err
+ }
+ listPrm.NextPageToken = listRes.NextPageToken
+ if err := e.evacuateTrees(ctx, sh, listRes.Items, prm, res, shards, shardsToEvacuate); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID,
+ prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees",
+ trace.WithAttributes(
+ attribute.Int("trees_count", len(trees)),
+ ))
+ defer span.End()
+
+ for _, contTree := range trees {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ success, shardID, err := e.tryEvacuateTreeLocal(ctx, sh, contTree, prm, shards, shardsToEvacuate)
+ if err != nil {
+ return err
+ }
+ if success {
+ e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal,
+ zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
+ zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID),
+ evacuationOperationLogField)
+ res.trEvacuated.Add(1)
+ continue
+ }
+
+ moved, nodePK, err := e.evacuateTreeToOtherNode(ctx, sh, contTree, prm)
+ if err != nil {
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
+ zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
+ zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
+ zap.Error(err))
+ return err
+ }
+ if moved {
+ e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote,
+ zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID),
+ zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK),
+ evacuationOperationLogField)
+ res.trEvacuated.Add(1)
+ } else if prm.IgnoreErrors {
+ res.trFailed.Add(1)
+ e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
+ zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
+ zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
+ zap.Error(err))
+ } else {
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
+ zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
+ zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
+ zap.Error(err))
+ return fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", contTree.TreeID, contTree.CID)
+ }
+ }
+ return nil
+}
+
+func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) {
+ if prm.TreeHandler == nil {
+ return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
+ }
+
+ return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh)
+}
+
+func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID,
+ prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+) (bool, string, error) {
+ target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate)
+ if err != nil {
+ return false, "", err
+ }
+ if !found {
+ return false, "", nil
+ }
+ const readBatchSize = 1000
+ source := make(chan *pilorama.Move, readBatchSize)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ var applyErr error
+ go func() {
+ defer wg.Done()
+
+ applyErr = target.TreeApplyStream(ctx, tree.CID, tree.TreeID, source)
+ if applyErr != nil {
+ cancel()
+ }
+ }()
+
+ var height uint64
+ for {
+ op, err := sh.TreeGetOpLog(ctx, tree.CID, tree.TreeID, height)
+ if err != nil {
+ cancel()
+ wg.Wait()
+ close(source) // close after cancel to ctx.Done() hits first
+ if prm.IgnoreErrors {
+ return false, "", nil
+ }
+ return false, "", err
+ }
+
+ if op.Time == 0 { // completed get op log
+ close(source)
+ wg.Wait()
+ if applyErr == nil {
+ return true, target.ID().String(), nil
+ }
+ if prm.IgnoreErrors {
+ return false, "", nil
+ }
+ return false, "", applyErr
+ }
+
+ select {
+ case <-ctx.Done(): // apply stream failed or operation cancelled
+ wg.Wait()
+ if prm.IgnoreErrors {
+ return false, "", nil
+ }
+ if applyErr != nil {
+ return false, "", applyErr
+ }
+ return false, "", ctx.Err()
+ case source <- &op:
+ }
+
+ height = op.Time + 1
+ }
+}
+
+// findShardToEvacuateTree returns first shard according HRW or first shard with tree exists.
+func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID,
+ shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
+) (hashedShard, bool, error) {
+ hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString()))
+ var result hashedShard
+ var found bool
+ for _, target := range shards {
+ select {
+ case <-ctx.Done():
+ return hashedShard{}, false, ctx.Err()
+ default:
+ }
+
+ if _, ok := shardsToEvacuate[target.ID().String()]; ok {
+ continue
+ }
+
+ if !target.PiloramaEnabled() || target.GetMode().ReadOnly() {
+ continue
+ }
+
+ if !found {
+ result = target
+ found = true
+ }
+
+ exists, err := target.TreeExists(ctx, tree.CID, tree.TreeID)
+ if err != nil {
+ continue
+ }
+ if exists {
+ return target, true, nil
+ }
+ }
+ return result, found, nil
+}
+
+func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) {
e.mtx.RLock()
- for i := range sidList {
- sh, ok := e.shards[sidList[i]]
+ defer e.mtx.RUnlock()
+
+ for i := range shardIDs {
+ sh, ok := e.shards[shardIDs[i]]
if !ok {
- e.mtx.RUnlock()
- return EvacuateShardRes{}, errShardNotFound
+ return nil, errShardNotFound
}
if !sh.GetMode().ReadOnly() {
- e.mtx.RUnlock()
- return EvacuateShardRes{}, shard.ErrMustBeReadOnly
+ return nil, ErrMustBeReadOnly
+ }
+
+ if prm.Scope.TreesOnly() && !sh.PiloramaEnabled() {
+ return nil, fmt.Errorf("shard %s doesn't have pilorama enabled", sh.ID())
}
}
- if len(e.shards)-len(sidList) < 1 && prm.handler == nil {
- e.mtx.RUnlock()
- return EvacuateShardRes{}, errMustHaveTwoShards
+ if len(e.shards)-len(shardIDs) < 1 && prm.ObjectsHandler == nil && prm.Scope.WithObjects() {
+ return nil, errMustHaveTwoShards
}
- e.log.Info("started shards evacuation", zap.Strings("shard_ids", sidList))
+ if len(e.shards)-len(shardIDs) < 1 && prm.TreeHandler == nil && prm.Scope.WithTrees() {
+ return nil, errMustHaveTwoShards
+ }
// We must have all shards, to have correct information about their
// indexes in a sorted slice and set appropriate marks in the metabase.
// Evacuated shard is skipped during put.
- shards := make([]pooledShard, 0, len(e.shards))
+ shards := make([]hashedShard, 0, len(e.shards))
for id := range e.shards {
- shards = append(shards, pooledShard{
- hashedShard: hashedShard(e.shards[id]),
- pool: e.shardPools[id],
- })
+ shards = append(shards, e.shards[id])
+ }
+ return shards, nil
+}
+
+func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
+ getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container,
+) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects")
+ defer span.End()
+
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
+
+ shards := getShards()
+ addr := objInfo.Address
+
+ var getPrm shard.GetPrm
+ getPrm.SetAddress(addr)
+ getPrm.SkipEvacCheck(true)
+
+ getRes, err := shardsToEvacuate[shardID].Get(ctx, getPrm)
+ if err != nil {
+ if prm.IgnoreErrors {
+ res.objFailed.Add(1)
+ return nil
+ }
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
+ return err
+ }
+
+ evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res, cnr)
+ if err != nil {
+ return err
+ }
+
+ if evacuatedLocal {
+ return nil
+ }
+
+ if prm.ObjectsHandler == nil {
+ // Do not check ignoreErrors flag here because
+ // ignoring errors on put make this command kinda useless.
+ return fmt.Errorf("%w: %s", errPutShard, objInfo)
+ }
+
+ moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
+ if err != nil {
+ e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
+ return err
+ }
+ if moved {
+ res.objEvacuated.Add(1)
+ } else if prm.IgnoreErrors {
+ res.objFailed.Add(1)
+ e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
+ } else {
+ return fmt.Errorf("object %s was not replicated", addr)
+ }
+ return nil
+}
+
+func (e *StorageEngine) isNotRepOne(c *container.Container) bool {
+ p := c.Value.PlacementPolicy()
+ for i := range p.NumberOfReplicas() {
+ if p.ReplicaDescriptor(i).NumberOfObjects() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
+ shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container,
+) (bool, error) {
+ hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString()))
+ for j := range shards {
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ default:
+ }
+
+ if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
+ continue
+ }
+ switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status {
+ case putToShardSuccess:
+ res.objEvacuated.Add(1)
+ e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard,
+ zap.Stringer("from", sh.ID()),
+ zap.Stringer("to", shards[j].ID()),
+ zap.Stringer("addr", addr),
+ evacuationOperationLogField)
+ return true, nil
+ case putToShardExists, putToShardRemoved:
+ res.objSkipped.Add(1)
+ return true, nil
+ default:
+ continue
+ }
+ }
+
+ return false, nil
+}
+
+func (e *StorageEngine) GetEvacuationState(ctx context.Context) (*EvacuationState, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ return e.evacuateLimiter.GetState(), nil
+}
+
+func (e *StorageEngine) EnqueRunningEvacuationStop(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ return e.evacuateLimiter.CancelIfRunning()
+}
+
+func (e *StorageEngine) ResetEvacuationStatus(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ return e.evacuateLimiter.ResetEvacuationStatus()
+}
+
+func (e *StorageEngine) ResetEvacuationStatusForShards() {
+ e.mtx.RLock()
+ defer e.mtx.RUnlock()
+ for _, sh := range e.shards {
+ sh.SetEvacuationInProgress(false)
}
- e.mtx.RUnlock()
-
- weights := make([]float64, 0, len(shards))
- for i := range shards {
- weights = append(weights, e.shardWeight(shards[i].Shard))
- }
-
- shardMap := make(map[string]*shard.Shard)
- for i := range sidList {
- for j := range shards {
- if shards[j].ID().String() == sidList[i] {
- shardMap[sidList[i]] = shards[j].Shard
- }
- }
- }
-
- var listPrm shard.ListWithCursorPrm
- listPrm.WithCount(defaultEvacuateBatchSize)
-
- var res EvacuateShardRes
-
-mainLoop:
- for n := range sidList {
- sh := shardMap[sidList[n]]
-
- var c *meta.Cursor
- for {
- listPrm.WithCursor(c)
-
- // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
- // because ListWithCursor works only with the metabase.
- listRes, err := sh.ListWithCursor(listPrm)
- if err != nil {
- if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) {
- continue mainLoop
- }
- return res, err
- }
-
- // TODO (@fyrchik): #1731 parallelize the loop
- lst := listRes.AddressList()
-
- loop:
- for i := range lst {
- addr := lst[i].Address
-
- var getPrm shard.GetPrm
- getPrm.SetAddress(addr)
-
- getRes, err := sh.Get(getPrm)
- if err != nil {
- if prm.ignoreErrors {
- continue
- }
- return res, err
- }
-
- hrw.SortHasherSliceByWeightValue(shards, weights, hrw.Hash([]byte(addr.EncodeToString())))
- for j := range shards {
- if _, ok := shardMap[shards[j].ID().String()]; ok {
- continue
- }
- putDone, exists := e.putToShard(shards[j].hashedShard, j, shards[j].pool, addr, getRes.Object())
- if putDone || exists {
- if putDone {
- e.log.Debug("object is moved to another shard",
- zap.String("from", sidList[n]),
- zap.Stringer("to", shards[j].ID()),
- zap.Stringer("addr", addr))
-
- res.count++
- }
- continue loop
- }
- }
-
- if prm.handler == nil {
- // Do not check ignoreErrors flag here because
- // ignoring errors on put make this command kinda useless.
- return res, fmt.Errorf("%w: %s", errPutShard, lst[i])
- }
-
- err = prm.handler(addr, getRes.Object())
- if err != nil {
- return res, err
- }
- res.count++
- }
-
- c = listRes.Cursor()
- }
- }
-
- e.log.Info("finished shards evacuation",
- zap.Strings("shard_ids", sidList))
- return res, nil
}
diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go
new file mode 100644
index 0000000000..b75e8686d8
--- /dev/null
+++ b/pkg/local_object_storage/engine/evacuate_limiter.go
@@ -0,0 +1,223 @@
+package engine
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "sync"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "golang.org/x/sync/errgroup"
+)
+
+type EvacuateProcessState int
+
+const (
+ EvacuateProcessStateUndefined EvacuateProcessState = iota
+ EvacuateProcessStateRunning
+ EvacuateProcessStateCompleted
+)
+
+type EvacuationState struct {
+ shardIDs []string
+ processState EvacuateProcessState
+ startedAt time.Time
+ finishedAt time.Time
+ result *EvacuateShardRes
+ errMessage string
+}
+
+func (s *EvacuationState) ShardIDs() []string {
+ if s == nil {
+ return nil
+ }
+ return s.shardIDs
+}
+
+func (s *EvacuationState) ObjectsEvacuated() uint64 {
+ if s == nil {
+ return 0
+ }
+ return s.result.ObjectsEvacuated()
+}
+
+func (s *EvacuationState) ObjectsTotal() uint64 {
+ if s == nil {
+ return 0
+ }
+ return s.result.ObjectsTotal()
+}
+
+func (s *EvacuationState) ObjectsFailed() uint64 {
+ if s == nil {
+ return 0
+ }
+ return s.result.ObjectsFailed()
+}
+
+func (s *EvacuationState) ObjectsSkipped() uint64 {
+ if s == nil {
+ return 0
+ }
+ return s.result.ObjectsSkipped()
+}
+
+func (s *EvacuationState) TreesEvacuated() uint64 {
+ if s == nil {
+ return 0
+ }
+ return s.result.TreesEvacuated()
+}
+
+func (s *EvacuationState) TreesTotal() uint64 {
+ if s == nil {
+ return 0
+ }
+ return s.result.TreesTotal()
+}
+
+func (s *EvacuationState) TreesFailed() uint64 {
+ if s == nil {
+ return 0
+ }
+ return s.result.TreesFailed()
+}
+
+func (s *EvacuationState) ProcessingStatus() EvacuateProcessState {
+ if s == nil {
+ return EvacuateProcessStateUndefined
+ }
+ return s.processState
+}
+
+func (s *EvacuationState) StartedAt() *time.Time {
+ if s == nil {
+ return nil
+ }
+ if s.startedAt.IsZero() {
+ return nil
+ }
+ return &s.startedAt
+}
+
+func (s *EvacuationState) FinishedAt() *time.Time {
+ if s == nil {
+ return nil
+ }
+ if s.finishedAt.IsZero() {
+ return nil
+ }
+ return &s.finishedAt
+}
+
+func (s *EvacuationState) ErrorMessage() string {
+ if s == nil {
+ return ""
+ }
+ return s.errMessage
+}
+
+func (s *EvacuationState) DeepCopy() *EvacuationState {
+ if s == nil {
+ return nil
+ }
+ shardIDs := slices.Clone(s.shardIDs)
+
+ return &EvacuationState{
+ shardIDs: shardIDs,
+ processState: s.processState,
+ startedAt: s.startedAt,
+ finishedAt: s.finishedAt,
+ errMessage: s.errMessage,
+ result: s.result.DeepCopy(),
+ }
+}
+
+type evacuationLimiter struct {
+ state EvacuationState
+ eg *errgroup.Group
+ cancel context.CancelFunc
+
+ guard *sync.RWMutex
+ statusCond *sync.Cond // used in unit tests
+}
+
+func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) {
+ l.guard.Lock()
+ defer l.guard.Unlock()
+
+ select {
+ case <-ctx.Done():
+ return nil, nil, ctx.Err()
+ default:
+ }
+
+ if l.state.processState == EvacuateProcessStateRunning {
+ return nil, nil, logicerr.New(fmt.Sprintf("evacuate is already running for shard ids %v", l.state.shardIDs))
+ }
+
+ var egCtx context.Context
+ egCtx, l.cancel = context.WithCancel(ctx)
+ l.eg, egCtx = errgroup.WithContext(egCtx)
+ l.state = EvacuationState{
+ shardIDs: shardIDs,
+ processState: EvacuateProcessStateRunning,
+ startedAt: time.Now().UTC(),
+ result: result,
+ }
+ l.statusCond.Broadcast()
+
+ return l.eg, egCtx, nil
+}
+
+func (l *evacuationLimiter) Complete(err error) {
+ l.guard.Lock()
+ defer l.guard.Unlock()
+
+ errMsq := ""
+ if err != nil {
+ errMsq = err.Error()
+ }
+ l.state.processState = EvacuateProcessStateCompleted
+ l.state.errMessage = errMsq
+ l.state.finishedAt = time.Now().UTC()
+ l.statusCond.Broadcast()
+
+ l.eg = nil
+}
+
+func (l *evacuationLimiter) GetState() *EvacuationState {
+ l.guard.RLock()
+ defer l.guard.RUnlock()
+
+ return l.state.DeepCopy()
+}
+
+func (l *evacuationLimiter) CancelIfRunning() error {
+ l.guard.Lock()
+ defer l.guard.Unlock()
+
+ if l.state.processState != EvacuateProcessStateRunning {
+ return logicerr.New("there is no running evacuation task")
+ }
+
+ l.cancel()
+ return nil
+}
+
+func (l *evacuationLimiter) ResetEvacuationStatus() error {
+ l.guard.Lock()
+ defer l.guard.Unlock()
+
+ if l.state.processState == EvacuateProcessStateRunning {
+ return logicerr.New("there is running evacuation task")
+ }
+
+ l.state = EvacuationState{}
+ l.eg = nil
+ l.cancel = nil
+ l.statusCond.Broadcast()
+
+ return nil
+}
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index a70a70dc4c..f2ba7d994a 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -1,91 +1,125 @@
package engine
import (
+ "context"
"errors"
"fmt"
- "os"
"path/filepath"
"strconv"
+ "sync"
+ "sync/atomic"
"testing"
+ "time"
+ coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
+ "golang.org/x/sync/errgroup"
)
-func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) {
- dir, err := os.MkdirTemp("", "*")
- require.NoError(t, err)
- t.Cleanup(func() { _ = os.RemoveAll(dir) })
+type containerStorage struct {
+ cntmap map[cid.ID]*container.Container
+ latency time.Duration
+}
- e := New(
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- WithShardPoolSize(1))
-
- ids := make([]*shard.ID, shardNum)
-
- for i := range ids {
- ids[i], err = e.AddShard(
- shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- shard.WithBlobStorOptions(
- blobstor.WithStorages([]blobstor.SubStorage{{
- Storage: fstree.New(
- fstree.WithPath(filepath.Join(dir, strconv.Itoa(i))),
- fstree.WithDepth(1)),
- }})),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", i))),
- meta.WithPermissions(0700),
- meta.WithEpochState(epochState{}),
- ))
- require.NoError(t, err)
+func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer.Container, error) {
+ time.Sleep(cs.latency)
+ v, ok := cs.cntmap[id]
+ if !ok {
+ return nil, new(apistatus.ContainerNotFound)
}
- require.NoError(t, e.Open())
- require.NoError(t, e.Init())
+ coreCnt := coreContainer.Container{
+ Value: *v,
+ }
+ return &coreCnt, nil
+}
+
+func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
+ return nil, nil
+}
+
+func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) {
+ dir := t.TempDir()
+
+ te := testNewEngine(t).
+ setShardsNumOpts(t, shardNum, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithLogger(test.NewLogger(t)),
+ shard.WithBlobStorOptions(
+ blobstor.WithStorages([]blobstor.SubStorage{{
+ Storage: fstree.New(
+ fstree.WithPath(filepath.Join(dir, strconv.Itoa(id))),
+ fstree.WithDepth(1)),
+ }})),
+ shard.WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),
+ meta.WithPermissions(0o700),
+ meta.WithEpochState(epochState{})),
+ shard.WithPiloramaOptions(
+ pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
+ pilorama.WithPerm(0o700),
+ ),
+ }
+ }).
+ prepare(t)
+ e, ids := te.engine, te.shardIDs
objects := make([]*objectSDK.Object, 0, objPerShard*len(ids))
-
- for _, sh := range ids {
- obj := generateObjectWithCID(t, cidtest.ID())
- objects = append(objects, obj)
-
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
- _, err := e.shards[sh.String()].Put(putPrm)
- require.NoError(t, err)
+ treeID := "version"
+ meta := []pilorama.KeyValue{
+ {Key: pilorama.AttributeVersion, Value: []byte("XXX")},
+ {Key: pilorama.AttributeFilename, Value: []byte("file.txt")},
}
+ cnrMap := make(map[cid.ID]*container.Container)
+ for _, sh := range ids {
+ for i := range objPerShard {
+ // Create dummy container
+ cnr1 := container.Container{}
+ cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i))
+ contID := cidtest.ID()
+ cnrMap[contID] = &cnr1
- for i := 0; ; i++ {
- objects = append(objects, generateObjectWithCID(t, cidtest.ID()))
+ obj := testutil.GenerateObjectWithCID(contID)
+ objects = append(objects, obj)
- var putPrm PutPrm
- putPrm.WithObject(objects[len(objects)-1])
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
- _, err := e.Put(putPrm)
- require.NoError(t, err)
-
- res, err := e.shards[ids[len(ids)-1].String()].List()
- require.NoError(t, err)
- if len(res.AddressList()) == objPerShard {
- break
+ _, err = e.shards[sh.String()].TreeAddByPath(context.Background(), pilorama.CIDDescriptor{CID: contID, Position: 0, Size: 1},
+ treeID, pilorama.AttributeFilename, []string{"path", "to", "the", "file"}, meta)
+ require.NoError(t, err)
}
}
+ e.SetContainerSource(&containerStorage{cntmap: cnrMap})
return e, ids, objects
}
-func TestEvacuateShard(t *testing.T) {
+func TestEvacuateShardObjects(t *testing.T) {
+ t.Parallel()
+
const objPerShard = 3
e, ids, objects := newEngineEvacuate(t, 3, objPerShard)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
evacuateShardID := ids[2].String()
@@ -94,7 +128,7 @@ func TestEvacuateShard(t *testing.T) {
var prm GetPrm
prm.WithAddress(objectCore.AddressOf(objects[i]))
- _, err := e.Get(prm)
+ _, err := e.Get(context.Background(), prm)
require.NoError(t, err)
}
}
@@ -102,19 +136,21 @@ func TestEvacuateShard(t *testing.T) {
checkHasObjects(t)
var prm EvacuateShardPrm
- prm.WithShardIDList(ids[2:3])
+ prm.ShardID = ids[2:3]
+ prm.Scope = EvacuateScopeObjects
t.Run("must be read-only", func(t *testing.T) {
- res, err := e.Evacuate(prm)
- require.ErrorIs(t, err, shard.ErrMustBeReadOnly)
- require.Equal(t, 0, res.Count())
+ err := e.Evacuate(context.Background(), prm)
+ require.ErrorIs(t, err, ErrMustBeReadOnly)
})
- require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
- res, err := e.Evacuate(prm)
+ err := e.Evacuate(context.Background(), prm)
require.NoError(t, err)
- require.Equal(t, objPerShard, res.count)
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, st.ErrorMessage(), "")
+ require.Equal(t, uint64(objPerShard), st.ObjectsEvacuated())
// We check that all objects are available both before and after shard removal.
// First case is a real-world use-case. It ensures that an object can be put in presense
@@ -122,36 +158,79 @@ func TestEvacuateShard(t *testing.T) {
// Second case ensures that all objects are indeed moved and available.
checkHasObjects(t)
+ // Objects on evacuated shards should be logically unavailable, but persisted on disk.
+ // This is necessary to prevent removing it by policer in case of `REP 1` policy.
+ for _, obj := range objects[len(objects)-objPerShard:] {
+ var prmGet shard.GetPrm
+ prmGet.SetAddress(objectCore.AddressOf(obj))
+ _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
+ require.Error(t, err)
+
+ prmGet.SkipEvacCheck(true)
+ _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
+ require.NoError(t, err)
+
+ var prmHead shard.HeadPrm
+ prmHead.SetAddress(objectCore.AddressOf(obj))
+ _, err = e.shards[evacuateShardID].Head(context.Background(), prmHead)
+ require.Error(t, err)
+
+ var existsPrm shard.ExistsPrm
+ existsPrm.Address = objectCore.AddressOf(obj)
+ _, err = e.shards[evacuateShardID].Exists(context.Background(), existsPrm)
+ require.Error(t, err)
+
+ var rngPrm shard.RngPrm
+ rngPrm.SetAddress(objectCore.AddressOf(obj))
+ _, err = e.shards[evacuateShardID].GetRange(context.Background(), rngPrm)
+ require.Error(t, err)
+ }
+
// Calling it again is OK, but all objects are already moved, so no new PUTs should be done.
- res, err = e.Evacuate(prm)
- require.NoError(t, err)
- require.Equal(t, 0, res.count)
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st = testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, st.ErrorMessage(), "")
+ require.Equal(t, uint64(0), st.ObjectsEvacuated())
checkHasObjects(t)
e.mtx.Lock()
delete(e.shards, evacuateShardID)
- delete(e.shardPools, evacuateShardID)
e.mtx.Unlock()
checkHasObjects(t)
}
-func TestEvacuateNetwork(t *testing.T) {
- var errReplication = errors.New("handler error")
+func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState {
+ var st *EvacuationState
+ var err error
+ e.evacuateLimiter.waitForCompleted()
+ st, err = e.GetEvacuationState(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
+ return st
+}
- acceptOneOf := func(objects []*objectSDK.Object, max int) func(oid.Address, *objectSDK.Object) error {
- var n int
- return func(addr oid.Address, obj *objectSDK.Object) error {
- if n == max {
- return errReplication
+func TestEvacuateObjectsNetwork(t *testing.T) {
+ t.Parallel()
+
+ errReplication := errors.New("handler error")
+
+ acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) {
+ var n atomic.Uint64
+ var mtx sync.Mutex
+ return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
+ mtx.Lock()
+ defer mtx.Unlock()
+ if n.Load() == max {
+ return false, errReplication
}
- n++
+ n.Add(1)
for i := range objects {
if addr == objectCore.AddressOf(objects[i]) {
require.Equal(t, objects[i], obj)
- return nil
+ return true, nil
}
}
require.FailNow(t, "handler was called with an unexpected object: %s", addr)
@@ -160,76 +239,589 @@ func TestEvacuateNetwork(t *testing.T) {
}
t.Run("single shard", func(t *testing.T) {
+ t.Parallel()
e, ids, objects := newEngineEvacuate(t, 1, 3)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
evacuateShardID := ids[0].String()
- require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
var prm EvacuateShardPrm
- prm.shardID = ids[0:1]
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeObjects
- res, err := e.Evacuate(prm)
+ err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, errMustHaveTwoShards)
- require.Equal(t, 0, res.Count())
- prm.handler = acceptOneOf(objects, 2)
+ prm.ObjectsHandler = acceptOneOf(objects, 2)
- res, err = e.Evacuate(prm)
- require.ErrorIs(t, err, errReplication)
- require.Equal(t, 2, res.Count())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Contains(t, st.ErrorMessage(), errReplication.Error())
+ require.Equal(t, uint64(2), st.ObjectsEvacuated())
})
t.Run("multiple shards, evacuate one", func(t *testing.T) {
+ t.Parallel()
e, ids, objects := newEngineEvacuate(t, 2, 3)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
- require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
var prm EvacuateShardPrm
- prm.shardID = ids[1:2]
- prm.handler = acceptOneOf(objects, 2)
+ prm.ShardID = ids[1:2]
+ prm.ObjectsHandler = acceptOneOf(objects, 2)
+ prm.Scope = EvacuateScopeObjects
- res, err := e.Evacuate(prm)
- require.ErrorIs(t, err, errReplication)
- require.Equal(t, 2, res.Count())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Contains(t, st.ErrorMessage(), errReplication.Error())
+ require.Equal(t, uint64(2), st.ObjectsEvacuated())
t.Run("no errors", func(t *testing.T) {
- prm.handler = acceptOneOf(objects, 3)
+ prm.ObjectsHandler = acceptOneOf(objects, 3)
- res, err := e.Evacuate(prm)
- require.NoError(t, err)
- require.Equal(t, 3, res.Count())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, st.ErrorMessage(), "")
+ require.Equal(t, uint64(3), st.ObjectsEvacuated())
})
})
t.Run("multiple shards, evacuate many", func(t *testing.T) {
+ t.Parallel()
e, ids, objects := newEngineEvacuate(t, 4, 5)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
evacuateIDs := ids[0:3]
- var totalCount int
+ var totalCount uint64
for i := range evacuateIDs {
- res, err := e.shards[ids[i].String()].List()
+ res, err := e.shards[ids[i].String()].List(context.Background())
require.NoError(t, err)
- totalCount += len(res.AddressList())
+ totalCount += uint64(len(res.AddressList()))
}
for i := range ids {
- require.NoError(t, e.shards[ids[i].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[i].String()].SetMode(context.Background(), mode.ReadOnly))
}
var prm EvacuateShardPrm
- prm.shardID = evacuateIDs
- prm.handler = acceptOneOf(objects, totalCount-1)
+ prm.ShardID = evacuateIDs
+ prm.ObjectsHandler = acceptOneOf(objects, totalCount-1)
+ prm.Scope = EvacuateScopeObjects
- res, err := e.Evacuate(prm)
- require.ErrorIs(t, err, errReplication)
- require.Equal(t, totalCount-1, res.Count())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Contains(t, st.ErrorMessage(), errReplication.Error())
+ require.Equal(t, totalCount-1, st.ObjectsEvacuated())
t.Run("no errors", func(t *testing.T) {
- prm.handler = acceptOneOf(objects, totalCount)
+ prm.ObjectsHandler = acceptOneOf(objects, totalCount)
- res, err := e.Evacuate(prm)
- require.NoError(t, err)
- require.Equal(t, totalCount, res.Count())
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, st.ErrorMessage(), "")
+ require.Equal(t, totalCount, st.ObjectsEvacuated())
})
})
}
+
+func TestEvacuateCancellation(t *testing.T) {
+ t.Parallel()
+ e, ids, _ := newEngineEvacuate(t, 2, 3)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[1:2]
+ prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ default:
+ }
+ return true, nil
+ }
+ prm.Scope = EvacuateScopeObjects
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ err := e.Evacuate(ctx, prm)
+ require.ErrorContains(t, err, "context canceled")
+}
+
+func TestEvacuateCancellationByError(t *testing.T) {
+ t.Parallel()
+ e, ids, _ := newEngineEvacuate(t, 2, 10)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[1:2]
+ var once atomic.Bool
+ prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ var err error
+ flag := true
+ if once.CompareAndSwap(false, true) {
+ err = errors.New("test error")
+ flag = false
+ }
+ return flag, err
+ }
+ prm.Scope = EvacuateScopeObjects
+ prm.ObjectWorkerCount = 2
+ prm.ContainerWorkerCount = 2
+
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Contains(t, st.ErrorMessage(), "test error")
+}
+
+func TestEvacuateSingleProcess(t *testing.T) {
+ e, ids, _ := newEngineEvacuate(t, 2, 3)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ blocker := make(chan any)
+ running := make(chan any)
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[1:2]
+ prm.Scope = EvacuateScopeObjects
+ prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ select {
+ case <-running:
+ default:
+ close(running)
+ }
+ <-blocker
+ return true, nil
+ }
+
+ eg, egCtx := errgroup.WithContext(context.Background())
+ eg.Go(func() error {
+ require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed")
+ return nil
+ })
+ eg.Go(func() error {
+ <-running
+ require.ErrorContains(t, e.Evacuate(egCtx, prm), "evacuate is already running for shard ids", "second evacuation not failed")
+ close(blocker)
+ return nil
+ })
+ require.NoError(t, eg.Wait())
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, uint64(3), st.ObjectsEvacuated())
+ require.Equal(t, st.ErrorMessage(), "")
+}
+
+func TestEvacuateObjectsAsync(t *testing.T) {
+ e, ids, _ := newEngineEvacuate(t, 2, 3)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ blocker := make(chan any)
+ running := make(chan any)
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[1:2]
+ prm.Scope = EvacuateScopeObjects
+ prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ select {
+ case <-running:
+ default:
+ close(running)
+ }
+ <-blocker
+ return true, nil
+ }
+
+ st, err := e.GetEvacuationState(context.Background())
+ require.NoError(t, err, "get init state failed")
+ require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
+ require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid init count")
+ require.Nil(t, st.StartedAt(), "invalid init started at")
+ require.Nil(t, st.FinishedAt(), "invalid init finished at")
+ require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
+ require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
+
+ eg, egCtx := errgroup.WithContext(context.Background())
+ eg.Go(func() error {
+ require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed")
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count")
+ return nil
+ })
+
+ <-running
+
+ st, err = e.GetEvacuationState(context.Background())
+ require.NoError(t, err, "get running state failed")
+ require.Equal(t, EvacuateProcessStateRunning, st.ProcessingStatus(), "invalid running state")
+ require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid running count")
+ require.NotNil(t, st.StartedAt(), "invalid running started at")
+ require.Nil(t, st.FinishedAt(), "invalid init finished at")
+ expectedShardIDs := make([]string, 0, 2)
+ for _, id := range ids[1:2] {
+ expectedShardIDs = append(expectedShardIDs, id.String())
+ }
+ require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid running shard ids")
+ require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
+
+ require.Error(t, e.ResetEvacuationStatus(context.Background()))
+
+ close(blocker)
+
+ st = testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count")
+ require.NotNil(t, st.StartedAt(), "invalid final started at")
+ require.NotNil(t, st.FinishedAt(), "invalid final finished at")
+ require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
+ require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
+
+ require.NoError(t, eg.Wait())
+
+ require.NoError(t, e.ResetEvacuationStatus(context.Background()))
+ st, err = e.GetEvacuationState(context.Background())
+ require.NoError(t, err, "get state after reset failed")
+ require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid state after reset")
+ require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid count after reset")
+ require.Nil(t, st.StartedAt(), "invalid started at after reset")
+ require.Nil(t, st.FinishedAt(), "invalid finished at after reset")
+ require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid shard ids after reset")
+ require.Equal(t, "", st.ErrorMessage(), "invalid error message after reset")
+}
+
+func TestEvacuateTreesLocal(t *testing.T) {
+ e, ids, _ := newEngineEvacuate(t, 2, 3)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeTrees
+
+ expectedShardIDs := make([]string, 0, 1)
+ for _, id := range ids[0:1] {
+ expectedShardIDs = append(expectedShardIDs, id.String())
+ }
+
+ st, err := e.GetEvacuationState(context.Background())
+ require.NoError(t, err, "get init state failed")
+ require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
+ require.Equal(t, uint64(0), st.TreesEvacuated(), "invalid init count")
+ require.Nil(t, st.StartedAt(), "invalid init started at")
+ require.Nil(t, st.FinishedAt(), "invalid init finished at")
+ require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
+ require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
+
+ require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed")
+
+ st = testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count")
+ require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count")
+ require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
+ require.NotNil(t, st.StartedAt(), "invalid final started at")
+ require.NotNil(t, st.FinishedAt(), "invalid final finished at")
+ require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
+ require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
+
+ sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[0].String()])
+ require.NoError(t, err, "list source trees failed")
+ require.Len(t, sourceTrees, 3)
+
+ for _, tr := range sourceTrees {
+ exists, err := e.shards[ids[1].String()].TreeExists(context.Background(), tr.CID, tr.TreeID)
+ require.NoError(t, err, "failed to check tree existance")
+ require.True(t, exists, "tree doesn't exists on target shard")
+
+ var height uint64
+ var sourceOps []pilorama.Move
+ for {
+ op, err := e.shards[ids[0].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
+ require.NoError(t, err)
+ if op.Time == 0 {
+ break
+ }
+ sourceOps = append(sourceOps, op)
+ height = op.Time + 1
+ }
+
+ height = 0
+ var targetOps []pilorama.Move
+ for {
+ op, err := e.shards[ids[1].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
+ require.NoError(t, err)
+ if op.Time == 0 {
+ break
+ }
+ targetOps = append(targetOps, op)
+ height = op.Time + 1
+ }
+
+ require.Equal(t, sourceOps, targetOps)
+ }
+}
+
+func TestEvacuateTreesRemote(t *testing.T) {
+ e, ids, _ := newEngineEvacuate(t, 2, 3)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ mutex := sync.Mutex{}
+ evacuatedTreeOps := make(map[string][]*pilorama.Move)
+ var prm EvacuateShardPrm
+ prm.ShardID = ids
+ prm.Scope = EvacuateScopeTrees
+ prm.TreeHandler = func(ctx context.Context, contID cid.ID, treeID string, f pilorama.Forest) (bool, string, error) {
+ key := contID.String() + treeID
+ var height uint64
+ for {
+ op, err := f.TreeGetOpLog(ctx, contID, treeID, height)
+ require.NoError(t, err)
+
+ if op.Time == 0 {
+ return true, "", nil
+ }
+ mutex.Lock()
+ evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op)
+ mutex.Unlock()
+ height = op.Time + 1
+ }
+ }
+
+ expectedShardIDs := make([]string, 0, len(ids))
+ for _, id := range ids {
+ expectedShardIDs = append(expectedShardIDs, id.String())
+ }
+
+ st, err := e.GetEvacuationState(context.Background())
+ require.NoError(t, err, "get init state failed")
+ require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
+ require.Equal(t, uint64(0), st.TreesEvacuated(), "invalid init count")
+ require.Nil(t, st.StartedAt(), "invalid init started at")
+ require.Nil(t, st.FinishedAt(), "invalid init finished at")
+ require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
+ require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
+
+ require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed")
+ st = testWaitForEvacuationCompleted(t, e)
+
+ require.Equal(t, uint64(6), st.TreesTotal(), "invalid trees total count")
+ require.Equal(t, uint64(6), st.TreesEvacuated(), "invalid trees evacuated count")
+ require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
+ require.NotNil(t, st.StartedAt(), "invalid final started at")
+ require.NotNil(t, st.FinishedAt(), "invalid final finished at")
+ require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
+ require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
+
+ expectedTreeOps := make(map[string][]*pilorama.Move)
+ for i := range len(e.shards) {
+ sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()])
+ require.NoError(t, err, "list source trees failed")
+ require.Len(t, sourceTrees, 3)
+
+ for _, tr := range sourceTrees {
+ key := tr.CID.String() + tr.TreeID
+ var height uint64
+ for {
+ op, err := e.shards[ids[i].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
+ require.NoError(t, err)
+
+ if op.Time == 0 {
+ break
+ }
+ expectedTreeOps[key] = append(expectedTreeOps[key], &op)
+ height = op.Time + 1
+ }
+ }
+ }
+
+ require.Equal(t, expectedTreeOps, evacuatedTreeOps)
+}
+
+func TestEvacuateShardObjectsRepOneOnly(t *testing.T) {
+ e, ids, _ := newEngineEvacuate(t, 2, 0)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ // Create container with policy REP 2
+ cnr1 := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(2)
+ p1.AddReplicas(x1)
+ x1 = netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(1)
+ p1.AddReplicas(x1)
+ cnr1.SetPlacementPolicy(p1)
+ cnr1.SetAttribute("cnr", "cnr1")
+
+ var idCnr1 cid.ID
+ container.CalculateID(&idCnr1, cnr1)
+
+ cnrmap := make(map[cid.ID]*container.Container)
+ var cids []cid.ID
+ cnrmap[idCnr1] = &cnr1
+ cids = append(cids, idCnr1)
+
+ // Create container with policy REP 1
+ cnr2 := container.Container{}
+ p2 := netmap.PlacementPolicy{}
+ p2.SetContainerBackupFactor(1)
+ x2 := netmap.ReplicaDescriptor{}
+ x2.SetNumberOfObjects(1)
+ p2.AddReplicas(x2)
+ x2 = netmap.ReplicaDescriptor{}
+ x2.SetNumberOfObjects(1)
+ p2.AddReplicas(x2)
+ cnr2.SetPlacementPolicy(p2)
+ cnr2.SetAttribute("cnr", "cnr2")
+
+ var idCnr2 cid.ID
+ container.CalculateID(&idCnr2, cnr2)
+ cnrmap[idCnr2] = &cnr2
+ cids = append(cids, idCnr2)
+
+ // Create container for simulate removing
+ cnr3 := container.Container{}
+ p3 := netmap.PlacementPolicy{}
+ p3.SetContainerBackupFactor(1)
+ x3 := netmap.ReplicaDescriptor{}
+ x3.SetNumberOfObjects(1)
+ p3.AddReplicas(x3)
+ cnr3.SetPlacementPolicy(p3)
+ cnr3.SetAttribute("cnr", "cnr3")
+
+ var idCnr3 cid.ID
+ container.CalculateID(&idCnr3, cnr3)
+ cids = append(cids, idCnr3)
+
+ e.SetContainerSource(&containerStorage{cntmap: cnrmap})
+
+ for _, sh := range ids {
+ for j := range 3 {
+ for range 4 {
+ obj := testutil.GenerateObjectWithCID(cids[j])
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+ }
+ }
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeObjects
+ prm.RepOneOnly = true
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ require.NoError(t, e.Evacuate(context.Background(), prm))
+ st := testWaitForEvacuationCompleted(t, e)
+ require.Equal(t, "", st.ErrorMessage())
+ require.Equal(t, uint64(4), st.ObjectsEvacuated())
+ require.Equal(t, uint64(8), st.ObjectsSkipped())
+ require.Equal(t, uint64(0), st.ObjectsFailed())
+}
+
+func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) {
+ t.Skip()
+ e, ids, _ := newEngineEvacuate(t, 2, 0)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ cnrmap := make(map[cid.ID]*container.Container)
+ var cids []cid.ID
+ // Create containers with policy REP 1
+ for i := range 10_000 {
+ cnr1 := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(2)
+ p1.AddReplicas(x1)
+ cnr1.SetPlacementPolicy(p1)
+ cnr1.SetAttribute("i", strconv.Itoa(i))
+
+ var idCnr1 cid.ID
+ container.CalculateID(&idCnr1, cnr1)
+
+ cnrmap[idCnr1] = &cnr1
+ cids = append(cids, idCnr1)
+ }
+
+ e.SetContainerSource(&containerStorage{
+ cntmap: cnrmap,
+ latency: time.Millisecond * 100,
+ })
+
+ for _, cnt := range cids {
+ for range 1 {
+ obj := testutil.GenerateObjectWithCID(cnt)
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[ids[0].String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+ }
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeObjects
+ prm.RepOneOnly = true
+ prm.ContainerWorkerCount = 10
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
+
+ start := time.Now()
+ err := e.Evacuate(context.Background(), prm)
+ testWaitForEvacuationCompleted(t, e)
+ t.Logf("evacuate took %v\n", time.Since(start))
+ require.NoError(t, err)
+}
+
+func (l *evacuationLimiter) waitForCompleted() {
+ l.guard.Lock()
+ defer l.guard.Unlock()
+
+ for l.state.processState != EvacuateProcessStateCompleted {
+ l.statusCond.Wait()
+ }
+}
diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go
index a43c7f23f8..7dac9eb97f 100644
--- a/pkg/local_object_storage/engine/exists.go
+++ b/pkg/local_object_storage/engine/exists.go
@@ -1,24 +1,27 @@
package engine
import (
+ "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
)
-func (e *StorageEngine) exists(addr oid.Address) (bool, error) {
- var shPrm shard.ExistsPrm
- shPrm.SetAddress(addr)
+// exists return in the first value true if object exists.
+// Second return value marks is parent object locked.
+func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool, bool, error) {
alreadyRemoved := false
exists := false
+ locked := false
- e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
- res, err := sh.Exists(shPrm)
+ if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) {
+ res, err := sh.Exists(ctx, shPrm)
if err != nil {
- if shard.IsErrRemoved(err) {
+ if client.IsErrObjectAlreadyRemoved(err) {
alreadyRemoved = true
return true
@@ -33,8 +36,8 @@ func (e *StorageEngine) exists(addr oid.Address) (bool, error) {
return true
}
- if !shard.IsErrNotFound(err) {
- e.reportShardError(sh, "could not check existence of object in shard", err)
+ if !client.IsErrObjectNotFound(err) {
+ e.reportShardError(ctx, sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address))
}
return false
}
@@ -42,15 +45,18 @@ func (e *StorageEngine) exists(addr oid.Address) (bool, error) {
if !exists {
exists = res.Exists()
}
+ if !locked {
+ locked = res.Locked()
+ }
return false
- })
-
- if alreadyRemoved {
- var errRemoved apistatus.ObjectAlreadyRemoved
-
- return false, errRemoved
+ }); err != nil {
+ return false, false, err
}
- return exists, nil
+ if alreadyRemoved {
+ return false, false, new(apistatus.ObjectAlreadyRemoved)
+ }
+
+ return exists, locked, nil
}
diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go
new file mode 100644
index 0000000000..9b3c0833f9
--- /dev/null
+++ b/pkg/local_object_storage/engine/exists_test.go
@@ -0,0 +1,51 @@
+package engine
+
+import (
+ "context"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+func BenchmarkExists(b *testing.B) {
+ b.Run("2 shards", func(b *testing.B) {
+ benchmarkExists(b, 2)
+ })
+ b.Run("4 shards", func(b *testing.B) {
+ benchmarkExists(b, 4)
+ })
+ b.Run("8 shards", func(b *testing.B) {
+ benchmarkExists(b, 8)
+ })
+}
+
+func benchmarkExists(b *testing.B, shardNum int) {
+ e := testNewEngine(b).setShardsNum(b, shardNum).prepare(b).engine
+ defer func() { require.NoError(b, e.Close(context.Background())) }()
+
+ addr := oidtest.Address()
+ for range 100 {
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ err := Put(context.Background(), e, obj, false)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ var shPrm shard.ExistsPrm
+ shPrm.Address = addr
+ shPrm.ECParentAddress = oid.Address{}
+ ok, _, err := e.exists(context.Background(), shPrm)
+ if err != nil || ok {
+ b.Fatalf("%t %v", ok, err)
+ }
+ }
+}
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 264ace58e5..0694c53f35 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -1,14 +1,20 @@
package engine
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -43,138 +49,172 @@ func (r GetRes) Object() *objectSDK.Object {
// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Get(prm GetPrm) (res GetRes, err error) {
+func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Get",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+ defer elapsed("Get", e.metrics.AddMethodDuration)()
+
err = e.execIfNotBlocked(func() error {
- res, err = e.get(prm)
+ res, err = e.get(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) get(prm GetPrm) (GetRes, error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddGetDuration)()
- }
-
- var (
- obj *objectSDK.Object
- siErr *objectSDK.SplitInfoError
-
- errNotFound apistatus.ObjectNotFound
-
- outSI *objectSDK.SplitInfo
- outError error = errNotFound
-
- shardWithMeta hashedShard
- metaError error
- )
+func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
+ errNotFound := new(apistatus.ObjectNotFound)
var shPrm shard.GetPrm
shPrm.SetAddress(prm.addr)
- var hasDegraded bool
- var objectExpired bool
-
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- noMeta := sh.GetMode().NoMetabase()
- shPrm.SetIgnoreMeta(noMeta)
-
- hasDegraded = hasDegraded || noMeta
-
- res, err := sh.Get(shPrm)
- if err != nil {
- if res.HasMeta() {
- shardWithMeta = sh
- metaError = err
- }
- switch {
- case shard.IsErrNotFound(err):
- return false // ignore, go to next shard
- case errors.As(err, &siErr):
- if outSI == nil {
- outSI = objectSDK.NewSplitInfo()
- }
-
- util.MergeSplitInfo(siErr.SplitInfo(), outSI)
-
- _, withLink := outSI.Link()
- _, withLast := outSI.LastPart()
-
- // stop iterating over shards if SplitInfo structure is complete
- if withLink && withLast {
- return true
- }
-
- return false
- case shard.IsErrRemoved(err):
- outError = err
-
- return true // stop, return it back
- case shard.IsErrObjectExpired(err):
- // object is found but should not
- // be returned
- objectExpired = true
- return true
- default:
- e.reportShardError(sh, "could not get object from shard", err)
- return false
- }
- }
-
- obj = res.Object()
-
- return true
- })
-
- if outSI != nil {
- return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI))
+ it := &getShardIterator{
+ OutError: errNotFound,
+ ShardPrm: shPrm,
+ Address: prm.addr,
+ Engine: e,
}
- if objectExpired {
+ if err := it.tryGetWithMeta(ctx); err != nil {
+ return GetRes{}, err
+ }
+
+ if it.SplitInfo != nil {
+ return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
+ }
+
+ if it.ECInfo != nil {
+ return GetRes{}, logicerr.Wrap(objectSDK.NewECInfoError(it.ECInfo))
+ }
+
+ if it.ObjectExpired {
return GetRes{}, errNotFound
}
- if obj == nil {
- if !hasDegraded && shardWithMeta.Shard == nil || !shard.IsErrNotFound(outError) {
- return GetRes{}, outError
+ if it.Object == nil {
+ if !it.HasDegraded && it.ShardWithMeta.Shard == nil || !client.IsErrObjectNotFound(it.OutError) {
+ return GetRes{}, it.OutError
}
- // If the object is not found but is present in metabase,
- // try to fetch it from blobstor directly. If it is found in any
- // blobstor, increase the error counter for the shard which contains the meta.
- shPrm.SetIgnoreMeta(true)
-
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- if sh.GetMode().NoMetabase() {
- // Already visited.
- return false
- }
-
- res, err := sh.Get(shPrm)
- obj = res.Object()
- return err == nil
- })
- if obj == nil {
- return GetRes{}, outError
+ if err := it.tryGetFromBlobstore(ctx); err != nil {
+ return GetRes{}, err
}
- if shardWithMeta.Shard != nil {
- e.reportShardError(shardWithMeta, "meta info was present, but object is missing",
- metaError, zap.Stringer("address", prm.addr))
+
+ if it.Object == nil {
+ return GetRes{}, it.OutError
+ }
+ if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
+ e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
+ zap.Stringer("shard_id", it.ShardWithMeta.ID()),
+ zap.Error(it.MetaError),
+ zap.Stringer("address", prm.addr))
}
}
return GetRes{
- obj: obj,
+ obj: it.Object,
}, nil
}
+type getShardIterator struct {
+ Object *objectSDK.Object
+ SplitInfo *objectSDK.SplitInfo
+ ECInfo *objectSDK.ECInfo
+ OutError error
+ ShardWithMeta hashedShard
+ MetaError error
+ HasDegraded bool
+ ObjectExpired bool
+
+ ShardPrm shard.GetPrm
+ Address oid.Address
+ Engine *StorageEngine
+
+ splitInfoErr *objectSDK.SplitInfoError
+ ecInfoErr *objectSDK.ECInfoError
+}
+
+func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error {
+ return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+ noMeta := sh.GetMode().NoMetabase()
+ i.ShardPrm.SetIgnoreMeta(noMeta)
+
+ i.HasDegraded = i.HasDegraded || noMeta
+
+ res, err := sh.Get(ctx, i.ShardPrm)
+ if err == nil {
+ i.Object = res.Object()
+ return true
+ }
+
+ if res.HasMeta() {
+ i.ShardWithMeta = sh
+ i.MetaError = err
+ }
+ switch {
+ case client.IsErrObjectNotFound(err):
+ return false // ignore, go to next shard
+ case errors.As(err, &i.splitInfoErr):
+ if i.SplitInfo == nil {
+ i.SplitInfo = objectSDK.NewSplitInfo()
+ }
+
+ util.MergeSplitInfo(i.splitInfoErr.SplitInfo(), i.SplitInfo)
+
+ _, withLink := i.SplitInfo.Link()
+ _, withLast := i.SplitInfo.LastPart()
+
+ // stop iterating over shards if SplitInfo structure is complete
+ return withLink && withLast
+ case errors.As(err, &i.ecInfoErr):
+ if i.ECInfo == nil {
+ i.ECInfo = objectSDK.NewECInfo()
+ }
+
+ util.MergeECInfo(i.ecInfoErr.ECInfo(), i.ECInfo)
+ // stop iterating over shards if ECInfo structure is complete
+ return len(i.ECInfo.Chunks) == int(i.ECInfo.Chunks[0].Total)
+ case client.IsErrObjectAlreadyRemoved(err):
+ i.OutError = err
+ return true // stop, return it back
+ case shard.IsErrObjectExpired(err):
+ // object is found but should not be returned
+ i.ObjectExpired = true
+ return true
+ default:
+ i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
+ return false
+ }
+ })
+}
+
+func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error {
+ // If the object is not found but is present in metabase,
+ // try to fetch it from blobstor directly. If it is found in any
+ // blobstor, increase the error counter for the shard which contains the meta.
+ i.ShardPrm.SetIgnoreMeta(true)
+
+ return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+ if sh.GetMode().NoMetabase() {
+ // Already visited.
+ return false
+ }
+
+ res, err := sh.Get(ctx, i.ShardPrm)
+ i.Object = res.Object()
+ return err == nil
+ })
+}
+
// Get reads object from local storage by provided address.
-func Get(storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
+func Get(ctx context.Context, storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
var getPrm GetPrm
getPrm.WithAddress(addr)
- res, err := storage.Get(getPrm)
+ res, err := storage.Get(ctx, getPrm)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go
index 689b46de82..d436dd4113 100644
--- a/pkg/local_object_storage/engine/head.go
+++ b/pkg/local_object_storage/engine/head.go
@@ -1,14 +1,18 @@
package engine
import (
+ "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
)
// HeadPrm groups the parameters of Head operation.
@@ -52,113 +56,95 @@ func (r HeadRes) Header() *objectSDK.Object {
// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object was inhumed.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Head(prm HeadPrm) (res HeadRes, err error) {
+func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err error) {
err = e.execIfNotBlocked(func() error {
- res, err = e.head(prm)
+ res, err = e.head(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) head(prm HeadPrm) (HeadRes, error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddHeadDuration)()
- }
+func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head")
+ defer span.End()
+ defer elapsed("Head", e.metrics.AddMethodDuration)()
var (
- head *objectSDK.Object
- siErr *objectSDK.SplitInfoError
-
- errNotFound apistatus.ObjectNotFound
-
+ head *objectSDK.Object
+ siErr *objectSDK.SplitInfoError
outSI *objectSDK.SplitInfo
- outError error = errNotFound
+ eiErr *objectSDK.ECInfoError
+ outEI *objectSDK.ECInfo
+ outError error = new(apistatus.ObjectNotFound)
+ shPrm shard.HeadPrm
)
-
- var shPrm shard.HeadPrm
shPrm.SetAddress(prm.addr)
shPrm.SetRaw(prm.raw)
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- res, err := sh.Head(shPrm)
+ if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) {
+ shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold
+ res, err := sh.Head(ctx, shPrm)
if err != nil {
switch {
- case shard.IsErrNotFound(err):
+ case client.IsErrObjectNotFound(err):
return false // ignore, go to next shard
case errors.As(err, &siErr):
if outSI == nil {
outSI = objectSDK.NewSplitInfo()
}
-
util.MergeSplitInfo(siErr.SplitInfo(), outSI)
-
_, withLink := outSI.Link()
_, withLast := outSI.LastPart()
-
// stop iterating over shards if SplitInfo structure is complete
if withLink && withLast {
return true
}
-
return false
- case shard.IsErrRemoved(err):
+ case errors.As(err, &eiErr):
+ if outEI == nil {
+ outEI = objectSDK.NewECInfo()
+ }
+ util.MergeECInfo(eiErr.ECInfo(), outEI)
+ // stop iterating over shards if ECInfo structure is complete
+ return len(outEI.Chunks) == int(outEI.Chunks[0].Total)
+ case client.IsErrObjectAlreadyRemoved(err):
outError = err
-
return true // stop, return it back
case shard.IsErrObjectExpired(err):
- var notFoundErr apistatus.ObjectNotFound
-
// object is found but should not
// be returned
- outError = notFoundErr
-
+ outError = new(apistatus.ObjectNotFound)
return true
default:
- e.reportShardError(sh, "could not head object from shard", err)
+ e.reportShardError(ctx, sh, "could not head object from shard", err, zap.Stringer("address", prm.addr))
return false
}
}
-
head = res.Object()
-
return true
- })
+ }); err != nil {
+ return HeadRes{}, err
+ }
+ if head != nil {
+ return HeadRes{head: head}, nil
+ }
if outSI != nil {
return HeadRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI))
}
-
- if head == nil {
- return HeadRes{}, outError
+ if outEI != nil {
+ return HeadRes{}, logicerr.Wrap(objectSDK.NewECInfoError(outEI))
}
-
- return HeadRes{
- head: head,
- }, nil
+ return HeadRes{}, outError
}
// Head reads object header from local storage by provided address.
-func Head(storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
+func Head(ctx context.Context, storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
var headPrm HeadPrm
headPrm.WithAddress(addr)
- res, err := storage.Head(headPrm)
- if err != nil {
- return nil, err
- }
-
- return res.Header(), nil
-}
-
-// HeadRaw reads object header from local storage by provided address and raw
-// flag.
-func HeadRaw(storage *StorageEngine, addr oid.Address, raw bool) (*objectSDK.Object, error) {
- var headPrm HeadPrm
- headPrm.WithAddress(addr)
- headPrm.WithRaw(raw)
-
- res, err := storage.Head(headPrm)
+ res, err := storage.Head(ctx, headPrm)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go
index 47f09f7a29..f9db81f169 100644
--- a/pkg/local_object_storage/engine/head_test.go
+++ b/pkg/local_object_storage/engine/head_test.go
@@ -1,24 +1,23 @@
package engine
import (
- "os"
+ "context"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
)
func TestHeadRaw(t *testing.T) {
- defer os.RemoveAll(t.Name())
-
cnr := cidtest.ID()
- splitID := object.NewSplitID()
+ splitID := objectSDK.NewSplitID()
- parent := generateObjectWithCID(t, cnr)
- addAttribute(parent, "foo", "bar")
+ parent := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(parent, "foo", "bar")
var parentAddr oid.Address
parentAddr.SetContainer(cnr)
@@ -26,12 +25,12 @@ func TestHeadRaw(t *testing.T) {
idParent, _ := parent.ID()
parentAddr.SetObject(idParent)
- child := generateObjectWithCID(t, cnr)
+ child := testutil.GenerateObjectWithCID(cnr)
child.SetParent(parent)
child.SetParentID(idParent)
child.SetSplitID(splitID)
- link := generateObjectWithCID(t, cnr)
+ link := testutil.GenerateObjectWithCID(cnr)
link.SetParent(parent)
link.SetParentID(idParent)
@@ -40,11 +39,11 @@ func TestHeadRaw(t *testing.T) {
link.SetSplitID(splitID)
t.Run("virtual object split in different shards", func(t *testing.T) {
- s1 := testNewShard(t, 1)
- s2 := testNewShard(t, 2)
+ te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
+ e := te.engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
- e := testNewEngineWithShards(s1, s2)
- defer e.Close()
+ s1, s2 := te.shards[0], te.shards[1]
var putPrmLeft shard.PutPrm
putPrmLeft.SetObject(child)
@@ -53,11 +52,11 @@ func TestHeadRaw(t *testing.T) {
putPrmLink.SetObject(link)
// put most left object in one shard
- _, err := s1.Put(putPrmLeft)
+ _, err := s1.Put(context.Background(), putPrmLeft)
require.NoError(t, err)
// put link object in another shard
- _, err = s2.Put(putPrmLink)
+ _, err = s2.Put(context.Background(), putPrmLink)
require.NoError(t, err)
// head with raw flag should return SplitInfoError
@@ -65,10 +64,10 @@ func TestHeadRaw(t *testing.T) {
headPrm.WithAddress(parentAddr)
headPrm.WithRaw(true)
- _, err = e.Head(headPrm)
+ _, err = e.Head(context.Background(), headPrm)
require.Error(t, err)
- var si *object.SplitInfoError
+ var si *objectSDK.SplitInfoError
require.ErrorAs(t, err, &si)
// SplitInfoError should contain info from both shards
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index 151820ab2b..e5f7072e28 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -4,11 +4,17 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -20,9 +26,6 @@ type InhumePrm struct {
forceRemoval bool
}
-// InhumeRes encapsulates results of inhume operation.
-type InhumeRes struct{}
-
// WithTarget sets a list of objects that should be inhumed and tombstone address
// as the reason for inhume operation.
//
@@ -60,18 +63,20 @@ var errInhumeFailure = errors.New("inhume operation failed")
// with that object) if WithForceRemoval option has been provided.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Inhume(prm InhumePrm) (res InhumeRes, err error) {
- err = e.execIfNotBlocked(func() error {
- res, err = e.inhume(prm)
- return err
- })
+func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume")
+ defer span.End()
+ defer elapsed("Inhume", e.metrics.AddMethodDuration)()
- return
+ return e.execIfNotBlocked(func() error {
+ return e.inhume(ctx, prm)
+ })
}
-func (e *StorageEngine) inhume(prm InhumePrm) (InhumeRes, error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddInhumeDuration)()
+func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error {
+ addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval)
+ if err != nil {
+ return err
}
var shPrm shard.InhumePrm
@@ -79,122 +84,231 @@ func (e *StorageEngine) inhume(prm InhumePrm) (InhumeRes, error) {
shPrm.ForceRemoval()
}
- for i := range prm.addrs {
- if !prm.forceRemoval {
- locked, err := e.IsLocked(prm.addrs[i])
- if err != nil {
- e.log.Warn("removing an object without full locking check",
- zap.Error(err),
- zap.Stringer("addr", prm.addrs[i]))
- } else if locked {
- var lockedErr apistatus.ObjectLocked
- return InhumeRes{}, lockedErr
- }
- }
-
+ for shardID, addrs := range addrsPerShard {
if prm.tombstone != nil {
- shPrm.SetTarget(*prm.tombstone, prm.addrs[i])
+ shPrm.SetTarget(*prm.tombstone, addrs...)
} else {
- shPrm.MarkAsGarbage(prm.addrs[i])
+ shPrm.MarkAsGarbage(addrs...)
}
- ok, err := e.inhumeAddr(prm.addrs[i], shPrm, true)
- if err != nil {
- return InhumeRes{}, err
+ sh, exists := e.shards[shardID]
+ if !exists {
+ e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard,
+ zap.Error(errors.New("this shard was expected to exist")),
+ zap.String("shard_id", shardID),
+ )
+ return errInhumeFailure
}
- if !ok {
- ok, err := e.inhumeAddr(prm.addrs[i], shPrm, false)
- if err != nil {
- return InhumeRes{}, err
- } else if !ok {
- return InhumeRes{}, errInhumeFailure
- }
+
+ if _, err := sh.Inhume(ctx, shPrm); err != nil {
+ e.reportInhumeError(ctx, err, sh)
+ return err
}
}
- return InhumeRes{}, nil
+ return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm)
}
-// Returns ok if object was inhumed during this invocation or before.
-func (e *StorageEngine) inhumeAddr(addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) {
- root := false
- var errLocked apistatus.ObjectLocked
- var existPrm shard.ExistsPrm
- var retErr error
- var ok bool
+func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) {
+ if err == nil {
+ return
+ }
- e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
- defer func() {
- // if object is root we continue since information about it
- // can be presented in other shards
- if checkExists && root {
- stop = false
- }
- }()
+ var errLocked *apistatus.ObjectLocked
+ switch {
+ case errors.As(err, &errLocked):
+ case errors.Is(err, shard.ErrLockObjectRemoval):
+ case errors.Is(err, shard.ErrReadOnlyMode):
+ case errors.Is(err, shard.ErrDegradedMode):
+ default:
+ e.reportShardError(ctx, hs, "couldn't inhume object in shard", err)
+ }
+}
- if checkExists {
- existPrm.SetAddress(addr)
- exRes, err := sh.Exists(existPrm)
- if err != nil {
- if shard.IsErrRemoved(err) || shard.IsErrObjectExpired(err) {
- // inhumed once - no need to be inhumed again
- ok = true
- return true
- }
+// inhumeNotFoundObjects removes object which are not found on any shard.
+//
+// Besides an object not being found on any shard, it is also important to
+// remove it anyway in order to populate the metabase indexes because they are
+// responsible for the correct object status, i.e., the status will be `object
+// not found` without the indexes, the status will be `object is already
+// removed` with the indexes.
+//
+// It is suggested to evenly remove those objects on each shard with the batch
+// size equal to 1 + floor(number of objects / number of shards).
+func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error {
+ if len(addrs) == 0 {
+ return nil
+ }
- var siErr *objectSDK.SplitInfoError
- if !errors.As(err, &siErr) {
- e.reportShardError(sh, "could not check for presents in shard", err)
- return
- }
+ var shPrm shard.InhumePrm
+ if prm.forceRemoval {
+ shPrm.ForceRemoval()
+ }
- root = true
- } else if !exRes.Exists() {
- return
- }
+ numObjectsPerShard := 1 + len(addrs)/len(e.shards)
+
+ var inhumeErr error
+ itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) {
+ numObjects := min(numObjectsPerShard, len(addrs))
+
+ if numObjects == 0 {
+ return true
}
- _, err := sh.Inhume(prm)
- if err != nil {
- switch {
- case errors.As(err, &errLocked):
- retErr = apistatus.ObjectLocked{}
- return true
- case errors.Is(err, shard.ErrLockObjectRemoval):
- retErr = meta.ErrLockObjectRemoval
- return true
- case errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, shard.ErrDegradedMode):
- retErr = err
- return true
- }
-
- e.reportShardError(sh, "could not inhume object in shard", err)
- return false
+ if prm.tombstone != nil {
+ shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...)
+ } else {
+ shPrm.MarkAsGarbage(addrs[:numObjects]...)
}
+ addrs = addrs[numObjects:]
- ok = true
- return true
+ _, inhumeErr = hs.Inhume(ctx, shPrm)
+ e.reportInhumeError(ctx, inhumeErr, hs)
+ return inhumeErr != nil
})
+ if inhumeErr != nil {
+ return inhumeErr
+ }
+ return itErr
+}
- return ok, retErr
+// groupObjectsByShard groups objects based on the shard(s) they are stored on.
+//
+// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
+// the objects are locked.
+//
+// Returns two sets of objects: found objects which are grouped per shard and
+// not found object. Not found objects are objects which are not found on any
+// shard. This can happen if a node is a container node but doesn't participate
+// in a replica group of the object.
+func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) {
+ groups = make(map[string][]oid.Address)
+
+ var ids []string
+ for _, addr := range addrs {
+ ids, err = e.findShards(ctx, addr, checkLocked)
+ if err != nil {
+ return
+ }
+
+ if len(ids) == 0 {
+ notFoundObjects = append(notFoundObjects, addr)
+ continue
+ }
+
+ for _, id := range ids {
+ groups[id] = append(groups[id], addr)
+ }
+ }
+
+ return
+}
+
+// findShards determines the shard(s) where the object is stored.
+//
+// If the object is a root object, multiple shards will be returned.
+//
+// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
+// the objects are locked.
+func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkLocked bool) ([]string, error) {
+ var (
+ ids []string
+ retErr error
+
+ prm shard.ExistsPrm
+
+ siErr *objectSDK.SplitInfoError
+ ecErr *objectSDK.ECInfoError
+
+ isRootObject bool
+ objectExists bool
+ )
+
+ if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
+ objectExists = false
+
+ prm.Address = addr
+ switch res, err := sh.Exists(ctx, prm); {
+ case client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err):
+ // NOTE(@a-savchuk): there were some considerations that we can stop
+ // immediately if the object is already removed or expired. However,
+ // the previous method behavior was:
+ // - keep iterating if it's a root object and already removed,
+ // - stop iterating if it's not a root object and removed.
+ //
+ // Since my task was only improving method speed, let's keep the
+ // previous method behavior. Continue if it's a root object.
+ return !isRootObject
+ case errors.As(err, &siErr) || errors.As(err, &ecErr):
+ isRootObject = true
+ objectExists = true
+ case err != nil:
+ e.reportShardError(
+ ctx, sh, "couldn't check for presence in shard",
+ err, zap.Stringer("address", addr),
+ )
+ case res.Exists():
+ objectExists = true
+ default:
+ }
+
+ if checkLocked {
+ if isLocked, err := sh.IsLocked(ctx, addr); err != nil {
+ e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck,
+ zap.Error(err),
+ zap.Stringer("address", addr),
+ )
+ } else if isLocked {
+ retErr = new(apistatus.ObjectLocked)
+ return true
+ }
+ }
+
+ // This exit point must come after checking if the object is locked,
+ // since the locked index may be populated even if the object doesn't
+ // exist.
+ if !objectExists {
+ return
+ }
+
+ ids = append(ids, sh.ID().String())
+
+ // Continue if it's a root object.
+ return !isRootObject
+ }); err != nil {
+ return nil, err
+ }
+
+ if retErr != nil {
+ return nil, retErr
+ }
+ return ids, nil
}
// IsLocked checks whether an object is locked according to StorageEngine's state.
-func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) {
+func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.IsLocked",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
var locked bool
var err error
var outErr error
- e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
- locked, err = h.Shard.IsLocked(addr)
+ if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) {
+ locked, err = h.IsLocked(ctx, addr)
if err != nil {
- e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("addr", addr))
+ e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr))
outErr = err
return false
}
return locked
- })
+ }); err != nil {
+ return false, err
+ }
if locked {
return locked, nil
@@ -203,43 +317,252 @@ func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) {
return locked, outErr
}
-func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) {
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- sh.HandleExpiredTombstones(addrs)
+// GetLocks return lock id's if object is locked according to StorageEngine's state.
+func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocks",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
- select {
- case <-ctx.Done():
- return true
- default:
- return false
+ var allLocks []oid.ID
+ var outErr error
+
+ if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) {
+ locks, err := h.GetLocks(ctx, addr)
+ if err != nil {
+ e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr))
+ outErr = err
}
- })
+ allLocks = append(allLocks, locks...)
+ return false
+ }); err != nil {
+ return nil, err
+ }
+ if len(allLocks) > 0 {
+ return allLocks, nil
+ }
+ return allLocks, outErr
}
-func (e *StorageEngine) processExpiredLocks(ctx context.Context, lockers []oid.Address) {
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- sh.HandleExpiredLocks(lockers)
+func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ sh.HandleExpiredTombstones(ctx, addrs)
select {
case <-ctx.Done():
- e.log.Info("interrupt processing the expired locks by context")
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err()))
return true
default:
return false
}
- })
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err))
+ }
+}
+
+func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ sh.HandleExpiredLocks(ctx, epoch, lockers)
+
+ select {
+ case <-ctx.Done():
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
+ return true
+ default:
+ return false
+ }
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err))
+ }
}
func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) {
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- sh.HandleDeletedLocks(lockers)
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ sh.HandleDeletedLocks(ctx, lockers)
select {
case <-ctx.Done():
- e.log.Info("interrupt processing the deleted locks by context")
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
return true
default:
return false
}
- })
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err))
+ }
+}
+
+func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) {
+ if len(ids) == 0 {
+ return
+ }
+ idMap, err := e.selectNonExistentIDs(ctx, ids)
+ if err != nil {
+ return
+ }
+ if len(idMap) == 0 {
+ return
+ }
+ var failed bool
+ var prm shard.ContainerSizePrm
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
+ select {
+ case <-ctx.Done():
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
+ failed = true
+ return true
+ default:
+ }
+
+ var drop []cid.ID
+ for id := range idMap {
+ prm.SetContainerID(id)
+ s, err := sh.ContainerSize(ctx, prm)
+ if err != nil {
+ e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+ failed = true
+ return true
+ }
+ if s.Size() > 0 {
+ drop = append(drop, id)
+ }
+ }
+ for _, id := range drop {
+ delete(idMap, id)
+ }
+
+ return len(idMap) == 0
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err))
+ return
+ }
+ if failed || len(idMap) == 0 {
+ return
+ }
+
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
+ select {
+ case <-ctx.Done():
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
+ failed = true
+ return true
+ default:
+ }
+
+ for id := range idMap {
+ if err := sh.DeleteContainerSize(ctx, id); err != nil {
+ e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+ failed = true
+ return true
+ }
+ }
+
+ return false
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err))
+ return
+ }
+ if failed {
+ return
+ }
+ for id := range idMap {
+ e.metrics.DeleteContainerSize(id.EncodeToString())
+ }
+}
+
+func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []cid.ID) {
+ if len(ids) == 0 {
+ return
+ }
+ idMap, err := e.selectNonExistentIDs(ctx, ids)
+ if err != nil {
+ return
+ }
+ if len(idMap) == 0 {
+ return
+ }
+ var failed bool
+ var prm shard.ContainerCountPrm
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
+ select {
+ case <-ctx.Done():
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
+ failed = true
+ return true
+ default:
+ }
+
+ var drop []cid.ID
+ for id := range idMap {
+ prm.ContainerID = id
+ s, err := sh.ContainerCount(ctx, prm)
+ if err != nil {
+ e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
+ failed = true
+ return true
+ }
+ if s.User > 0 || s.Logic > 0 || s.Phy > 0 {
+ drop = append(drop, id)
+ }
+ }
+ for _, id := range drop {
+ delete(idMap, id)
+ }
+
+ return len(idMap) == 0
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err))
+ return
+ }
+ if failed || len(idMap) == 0 {
+ return
+ }
+
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
+ select {
+ case <-ctx.Done():
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
+ failed = true
+ return true
+ default:
+ }
+
+ for id := range idMap {
+ if err := sh.DeleteContainerCount(ctx, id); err != nil {
+ e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+ failed = true
+ return true
+ }
+ }
+
+ return false
+ }); err != nil {
+ e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err))
+ return
+ }
+ if failed {
+ return
+ }
+ for id := range idMap {
+ e.metrics.DeleteContainerCount(id.EncodeToString())
+ }
+}
+
+func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID) (map[cid.ID]struct{}, error) {
+ cs := e.containerSource.Load()
+
+ idMap := make(map[cid.ID]struct{})
+ for _, id := range ids {
+ isAvailable, err := cs.IsContainerAvailable(ctx, id)
+ if err != nil {
+ e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
+ return nil, err
+ }
+ if isAvailable {
+ continue
+ }
+ idMap[id] = struct{}{}
+ }
+ return idMap, nil
}
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index 8cf0b16677..0e268cd231 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -1,35 +1,44 @@
package engine
import (
- "os"
+ "context"
+ "fmt"
+ "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
"github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
)
func TestStorageEngine_Inhume(t *testing.T) {
- defer os.RemoveAll(t.Name())
-
cnr := cidtest.ID()
splitID := objectSDK.NewSplitID()
fs := objectSDK.SearchFilters{}
fs.AddRootFilter()
- tombstoneID := object.AddressOf(generateObjectWithCID(t, cnr))
- parent := generateObjectWithCID(t, cnr)
+ tombstoneID := object.AddressOf(testutil.GenerateObjectWithCID(cnr))
+ parent := testutil.GenerateObjectWithCID(cnr)
- child := generateObjectWithCID(t, cnr)
+ child := testutil.GenerateObjectWithCID(cnr)
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
child.SetSplitID(splitID)
- link := generateObjectWithCID(t, cnr)
+ link := testutil.GenerateObjectWithCID(cnr)
link.SetParent(parent)
link.SetParentID(idParent)
idChild, _ := child.ID()
@@ -37,48 +46,297 @@ func TestStorageEngine_Inhume(t *testing.T) {
link.SetSplitID(splitID)
t.Run("delete small object", func(t *testing.T) {
- e := testNewEngineWithShardNum(t, 1)
- defer e.Close()
+ t.Parallel()
+ e := testNewEngine(t).setShardsNum(t, 1).prepare(t).engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
- err := Put(e, parent)
+ err := Put(context.Background(), e, parent, false)
require.NoError(t, err)
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- _, err = e.Inhume(inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, false, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
t.Run("delete big object", func(t *testing.T) {
- s1 := testNewShard(t, 1)
- s2 := testNewShard(t, 2)
+ t.Parallel()
- e := testNewEngineWithShards(s1, s2)
- defer e.Close()
+ te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
+ e := te.engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
+
+ s1, s2 := te.shards[0], te.shards[1]
var putChild shard.PutPrm
putChild.SetObject(child)
- _, err := s1.Put(putChild)
+ _, err := s1.Put(context.Background(), putChild)
require.NoError(t, err)
var putLink shard.PutPrm
putLink.SetObject(link)
- _, err = s2.Put(putLink)
+ _, err = s2.Put(context.Background(), putLink)
require.NoError(t, err)
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- _, err = e.Inhume(inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, false, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
}
+
+func TestStorageEngine_ECInhume(t *testing.T) {
+ parentObjectAddress := oidtest.Address()
+ containerID := parentObjectAddress.Container()
+
+ chunkObject0 := testutil.GenerateObjectWithCID(containerID)
+ chunkObject0.SetECHeader(objectSDK.NewECHeader(
+ objectSDK.ECParentInfo{
+ ID: parentObjectAddress.Object(),
+ }, 0, 4, []byte{}, 0))
+
+ chunkObject1 := testutil.GenerateObjectWithCID(containerID)
+ chunkObject1.SetECHeader(objectSDK.NewECHeader(
+ objectSDK.ECParentInfo{
+ ID: parentObjectAddress.Object(),
+ }, 1, 4, []byte{}, 0))
+
+ tombstone := objectSDK.NewTombstone()
+ tombstone.SetMembers([]oid.ID{parentObjectAddress.Object()})
+ payload, err := tombstone.Marshal()
+ require.NoError(t, err)
+ tombstoneObject := testutil.GenerateObjectWithCID(containerID)
+ tombstoneObject.SetType(objectSDK.TypeTombstone)
+ tombstoneObject.SetPayload(payload)
+ tombstoneObjectAddress := object.AddressOf(tombstoneObject)
+
+ e := testNewEngine(t).setShardsNum(t, 5).prepare(t).engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
+
+ require.NoError(t, Put(context.Background(), e, chunkObject0, false))
+
+ require.NoError(t, Put(context.Background(), e, tombstoneObject, false))
+
+ var inhumePrm InhumePrm
+ inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress)
+ err = e.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+
+ var alreadyRemoved *apistatus.ObjectAlreadyRemoved
+
+ require.ErrorAs(t, Put(context.Background(), e, chunkObject0, false), &alreadyRemoved)
+
+ require.ErrorAs(t, Put(context.Background(), e, chunkObject1, false), &alreadyRemoved)
+}
+
+func TestInhumeExpiredRegularObject(t *testing.T) {
+ t.Parallel()
+
+ const currEpoch = 42
+ const objectExpiresAfter = currEpoch - 1
+
+ engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
+ return []shard.Option{
+ shard.WithDisabledGC(),
+ shard.WithMetaBaseOptions(append(
+ testGetDefaultMetabaseOptions(t),
+ meta.WithEpochState(epochState{currEpoch}),
+ )...),
+ }
+ }).prepare(t).engine
+
+ cnr := cidtest.ID()
+
+ generateAndPutObject := func() *objectSDK.Object {
+ obj := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter))
+
+ var putPrm PutPrm
+ putPrm.Object = obj
+ require.NoError(t, engine.Put(context.Background(), putPrm))
+ return obj
+ }
+
+ t.Run("inhume with tombstone", func(t *testing.T) {
+ obj := generateAndPutObject()
+ ts := oidtest.Address()
+ ts.SetContainer(cnr)
+
+ var prm InhumePrm
+ prm.WithTarget(ts, object.AddressOf(obj))
+ err := engine.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ })
+
+ t.Run("inhume without tombstone", func(t *testing.T) {
+ obj := generateAndPutObject()
+
+ var prm InhumePrm
+ prm.MarkAsGarbage(object.AddressOf(obj))
+ err := engine.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ })
+}
+
+func BenchmarkInhumeMultipart(b *testing.B) {
+ // The benchmark result insignificantly depends on the number of shards,
+ // so do not use it as a benchmark parameter, just set it big enough.
+ numShards := 100
+
+ for numObjects := 1; numObjects <= 10000; numObjects *= 10 {
+ b.Run(
+ fmt.Sprintf("objects=%d", numObjects),
+ func(b *testing.B) {
+ benchmarkInhumeMultipart(b, numShards, numObjects)
+ },
+ )
+ }
+}
+
+func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) {
+ b.StopTimer()
+
+ engine := testNewEngine(b).
+ setShardsNum(b, numShards).prepare(b).engine
+ defer func() { require.NoError(b, engine.Close(context.Background())) }()
+
+ cnt := cidtest.ID()
+ eg := errgroup.Group{}
+
+ for range b.N {
+ addrs := make([]oid.Address, numObjects)
+
+ for i := range numObjects {
+ prm := PutPrm{}
+
+ prm.Object = objecttest.Object().Parent()
+ prm.Object.SetContainerID(cnt)
+ prm.Object.SetType(objectSDK.TypeRegular)
+
+ addrs[i] = object.AddressOf(prm.Object)
+
+ eg.Go(func() error {
+ return engine.Put(context.Background(), prm)
+ })
+ }
+ require.NoError(b, eg.Wait())
+
+ ts := oidtest.Address()
+ ts.SetContainer(cnt)
+
+ prm := InhumePrm{}
+ prm.WithTarget(ts, addrs...)
+
+ b.StartTimer()
+ err := engine.Inhume(context.Background(), prm)
+ require.NoError(b, err)
+ b.StopTimer()
+ }
+}
+
+func TestInhumeIfObjectDoesntExist(t *testing.T) {
+ const numShards = 4
+
+ engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine
+ t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) })
+
+ t.Run("inhume without tombstone", func(t *testing.T) {
+ testInhumeIfObjectDoesntExist(t, engine, false, false)
+ })
+ t.Run("inhume with tombstone", func(t *testing.T) {
+ testInhumeIfObjectDoesntExist(t, engine, true, false)
+ })
+ t.Run("force inhume", func(t *testing.T) {
+ testInhumeIfObjectDoesntExist(t, engine, false, true)
+ })
+
+ t.Run("object is locked", func(t *testing.T) {
+ t.Run("inhume without tombstone", func(t *testing.T) {
+ testInhumeLockedIfObjectDoesntExist(t, engine, false, false)
+ })
+ t.Run("inhume with tombstone", func(t *testing.T) {
+ testInhumeLockedIfObjectDoesntExist(t, engine, true, false)
+ })
+ t.Run("force inhume", func(t *testing.T) {
+ testInhumeLockedIfObjectDoesntExist(t, engine, false, true)
+ })
+ })
+}
+
+func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) {
+ t.Parallel()
+
+ object := oidtest.Address()
+ require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce))
+
+ err := testHeadObject(e, object)
+ if withTombstone {
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
+ } else {
+ require.True(t, client.IsErrObjectNotFound(err))
+ }
+}
+
+func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) {
+ t.Parallel()
+
+ object := oidtest.Address()
+ require.NoError(t, testLockObject(e, object))
+
+ err := testInhumeObject(t, e, object, withTombstone, withForce)
+ if !withForce {
+ var errLocked *apistatus.ObjectLocked
+ require.ErrorAs(t, err, &errLocked)
+ return
+ }
+ require.NoError(t, err)
+
+ err = testHeadObject(e, object)
+ if withTombstone {
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
+ } else {
+ require.True(t, client.IsErrObjectNotFound(err))
+ }
+}
+
+func testLockObject(e *StorageEngine, obj oid.Address) error {
+ return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()})
+}
+
+func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error {
+ tombstone := oidtest.Address()
+ tombstone.SetContainer(obj.Container())
+
+ // Due to the tests design it is possible to set both the options,
+ // however removal with tombstone and force removal are exclusive.
+ require.False(t, withTombstone && withForce)
+
+ var inhumePrm InhumePrm
+ if withTombstone {
+ inhumePrm.WithTarget(tombstone, obj)
+ } else {
+ inhumePrm.MarkAsGarbage(obj)
+ }
+ if withForce {
+ inhumePrm.WithForceRemoval()
+ }
+ return e.Inhume(context.Background(), inhumePrm)
+}
+
+func testHeadObject(e *StorageEngine, obj oid.Address) error {
+ var headPrm HeadPrm
+ headPrm.WithAddress(obj)
+
+ _, err := e.Head(context.Background(), headPrm)
+ return err
+}
diff --git a/pkg/local_object_storage/engine/list.go b/pkg/local_object_storage/engine/list.go
index 8644a7f7e4..073248862b 100644
--- a/pkg/local_object_storage/engine/list.go
+++ b/pkg/local_object_storage/engine/list.go
@@ -1,10 +1,13 @@
package engine
import (
+ "context"
+ "math/rand"
"sort"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
)
// ErrEndOfListing is returned from an object listing with cursor
@@ -12,10 +15,38 @@ import (
// cursor. Use nil cursor object to start listing again.
var ErrEndOfListing = shard.ErrEndOfListing
-// Cursor is a type for continuous object listing.
+// Cursor is a type for continuous object listing. Cursor contains shard IDs to read
+// and shard cursors that contain state from previous read.
type Cursor struct {
- shardID string
- shardCursor *shard.Cursor
+ current string
+ shardIDs map[string]bool
+ shardIDToCursor map[string]*shard.Cursor
+}
+
+func (c *Cursor) getCurrentShardCursor() *shard.Cursor {
+ return c.shardIDToCursor[c.current]
+}
+
+func (c *Cursor) setCurrentShardCursor(sc *shard.Cursor) {
+ c.shardIDToCursor[c.current] = sc
+}
+
+func (c *Cursor) nextShard() bool {
+ var shardsToRead []string
+ for shardID, read := range c.shardIDs {
+ if !read {
+ shardsToRead = append(shardsToRead, shardID)
+ }
+ }
+ if len(shardsToRead) == 0 {
+ return false
+ }
+ c.current = shardsToRead[rand.Intn(len(shardsToRead))]
+ return true
+}
+
+func (c *Cursor) setShardRead(shardID string) {
+ c.shardIDs[shardID] = true
}
// ListWithCursorPrm contains parameters for ListWithCursor operation.
@@ -38,12 +69,12 @@ func (p *ListWithCursorPrm) WithCursor(cursor *Cursor) {
// ListWithCursorRes contains values returned from ListWithCursor operation.
type ListWithCursorRes struct {
- addrList []objectcore.AddressWithType
+ addrList []objectcore.Info
cursor *Cursor
}
// AddressList returns addresses selected by ListWithCursor operation.
-func (l ListWithCursorRes) AddressList() []objectcore.AddressWithType {
+func (l ListWithCursorRes) AddressList() []objectcore.Info {
return l.addrList
}
@@ -57,65 +88,70 @@ func (l ListWithCursorRes) Cursor() *Cursor {
// Does not include inhumed objects. Use cursor value from the response
// for consecutive requests.
//
+// If count param is big enough, then the method reads objects from different shards
+// by portions. In this case shards are chosen randomly, if they're not read out yet.
+//
+// Adding a shard between ListWithCursor does not invalidate the cursor but new shard
+// won't be listed.
+// Removing a shard between ListWithCursor leads to the undefined behavior
+// (e.g. usage of the objects from the removed shard).
+//
// Returns ErrEndOfListing if there are no more objects to return or count
// parameter set to zero.
-func (e *StorageEngine) ListWithCursor(prm ListWithCursorPrm) (ListWithCursorRes, error) {
- result := make([]objectcore.AddressWithType, 0, prm.count)
+func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (ListWithCursorRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.ListWithCursor")
+ defer span.End()
+ defer elapsed("ListWithCursor", e.metrics.AddMethodDuration)()
- // 1. Get available shards and sort them.
- e.mtx.RLock()
- shardIDs := make([]string, 0, len(e.shards))
- for id := range e.shards {
- shardIDs = append(shardIDs, id)
- }
- e.mtx.RUnlock()
+ result := make([]objectcore.Info, 0, prm.count)
- if len(shardIDs) == 0 {
- return ListWithCursorRes{}, ErrEndOfListing
- }
-
- sort.Slice(shardIDs, func(i, j int) bool {
- return shardIDs[i] < shardIDs[j]
- })
-
- // 2. Prepare cursor object.
+ // Set initial cursors
cursor := prm.cursor
if cursor == nil {
- cursor = &Cursor{shardID: shardIDs[0]}
+ shardIDs := getSortedShardIDs(e)
+ if len(shardIDs) == 0 {
+ return ListWithCursorRes{}, ErrEndOfListing
+ }
+ cursor = newCursor(shardIDs)
}
- // 3. Iterate over available shards. Skip unavailable shards.
- for i := range shardIDs {
+ const (
+ splitShardCountLimit = 100
+ shardsNum = 4
+ )
+
+ batchSize := prm.count
+ if batchSize >= splitShardCountLimit {
+ batchSize /= shardsNum
+ }
+
+ for cursor.nextShard() {
if len(result) >= int(prm.count) {
break
}
-
- if shardIDs[i] < cursor.shardID {
- continue
- }
+ curr := cursor.current
e.mtx.RLock()
- shardInstance, ok := e.shards[shardIDs[i]]
+ shardInstance, ok := e.shards[curr]
e.mtx.RUnlock()
if !ok {
+ cursor.setShardRead(curr)
continue
}
- count := uint32(int(prm.count) - len(result))
+ count := min(prm.count-uint32(len(result)), batchSize)
+
var shardPrm shard.ListWithCursorPrm
shardPrm.WithCount(count)
- if shardIDs[i] == cursor.shardID {
- shardPrm.WithCursor(cursor.shardCursor)
- }
+ shardPrm.WithCursor(cursor.getCurrentShardCursor())
- res, err := shardInstance.ListWithCursor(shardPrm)
+ res, err := shardInstance.ListWithCursor(ctx, shardPrm)
if err != nil {
+ cursor.setShardRead(curr)
continue
}
-
result = append(result, res.AddressList()...)
- cursor.shardCursor = res.Cursor()
- cursor.shardID = shardIDs[i]
+ cursor.setCurrentShardCursor(res.Cursor())
}
if len(result) == 0 {
@@ -127,3 +163,23 @@ func (e *StorageEngine) ListWithCursor(prm ListWithCursorPrm) (ListWithCursorRes
cursor: cursor,
}, nil
}
+
+func getSortedShardIDs(e *StorageEngine) []string {
+ e.mtx.RLock()
+ shardIDs := make([]string, 0, len(e.shards))
+ for id := range e.shards {
+ shardIDs = append(shardIDs, id)
+ }
+ e.mtx.RUnlock()
+ sort.Strings(shardIDs)
+ return shardIDs
+}
+
+func newCursor(shardIDs []string) *Cursor {
+ shardIDsMap := make(map[string]bool)
+ shardIDToCursor := make(map[string]*shard.Cursor)
+ for _, shardID := range shardIDs {
+ shardIDsMap[shardID] = false
+ }
+ return &Cursor{shardIDs: shardIDsMap, shardIDToCursor: shardIDToCursor}
+}
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index 1771cf0848..6cfa546f8e 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -1,76 +1,103 @@
package engine
import (
- "errors"
- "os"
- "sort"
+ "context"
+ "path/filepath"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
)
func TestListWithCursor(t *testing.T) {
- s1 := testNewShard(t, 1)
- s2 := testNewShard(t, 2)
- e := testNewEngineWithShards(s1, s2)
+ t.Parallel()
- t.Cleanup(func() {
- e.Close()
- os.RemoveAll(t.Name())
- })
-
- const total = 20
-
- expected := make([]object.AddressWithType, 0, total)
- got := make([]object.AddressWithType, 0, total)
-
- for i := 0; i < total; i++ {
- containerID := cidtest.ID()
- obj := generateObjectWithCID(t, containerID)
-
- var prm PutPrm
- prm.WithObject(obj)
-
- _, err := e.Put(prm)
- require.NoError(t, err)
- expected = append(expected, object.AddressWithType{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
+ tests := []struct {
+ name string
+ shardNum int
+ objectNum int
+ batchSize uint32
+ }{
+ {
+ name: "one shard, few objects, small batch size",
+ shardNum: 1,
+ objectNum: 2,
+ batchSize: 1,
+ },
+ {
+ name: "one shard, many objects, big batch size",
+ shardNum: 1,
+ objectNum: 53,
+ batchSize: 100,
+ },
+ {
+ name: "many shards, many objects, small batch size",
+ shardNum: 6,
+ objectNum: 66,
+ batchSize: 1,
+ },
+ {
+ name: "many shards, many objects, big batch size",
+ shardNum: 6,
+ objectNum: 99,
+ batchSize: 100,
+ },
}
+ for i := range tests {
+ tt := tests[i]
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ e := testNewEngine(t).setShardsNumOpts(t, tt.shardNum, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithLogger(test.NewLogger(t)),
+ shard.WithBlobStorOptions(
+ blobstor.WithStorages(
+ newStorages(t, t.TempDir(), 1<<20))),
+ shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
+ shard.WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
+ meta.WithPermissions(0o700),
+ meta.WithEpochState(epochState{}),
+ ),
+ }
+ }).prepare(t).engine
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
- expected = sortAddresses(expected)
+ expected := make([]object.Info, 0, tt.objectNum)
+ got := make([]object.Info, 0, tt.objectNum)
- var prm ListWithCursorPrm
- prm.WithCount(1)
+ for range tt.objectNum {
+ containerID := cidtest.ID()
+ obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
+ err := e.Put(context.Background(), PutPrm{Object: obj})
+ require.NoError(t, err)
+ expected = append(expected, object.Info{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
+ }
- res, err := e.ListWithCursor(prm)
- require.NoError(t, err)
- require.NotEmpty(t, res.AddressList())
- got = append(got, res.AddressList()...)
+ var prm ListWithCursorPrm
+ prm.count = tt.batchSize
+ for {
+ res, err := e.ListWithCursor(context.Background(), prm)
+ if err == ErrEndOfListing {
+ require.Empty(t, res.AddressList())
+ break
+ }
+ require.NotEmpty(t, res.AddressList())
+ got = append(got, res.AddressList()...)
+ prm.cursor = res.Cursor()
+ }
- for i := 0; i < total-1; i++ {
- prm.WithCursor(res.Cursor())
-
- res, err = e.ListWithCursor(prm)
- if errors.Is(err, ErrEndOfListing) {
- break
- }
- got = append(got, res.AddressList()...)
+ require.ElementsMatch(t, expected, got)
+ })
}
-
- prm.WithCursor(res.Cursor())
-
- _, err = e.ListWithCursor(prm)
- require.ErrorIs(t, err, ErrEndOfListing)
-
- got = sortAddresses(got)
- require.Equal(t, expected, got)
-}
-
-func sortAddresses(addrWithType []object.AddressWithType) []object.AddressWithType {
- sort.Slice(addrWithType, func(i, j int) bool {
- return addrWithType[i].Address.EncodeToString() < addrWithType[j].Address.EncodeToString()
- })
- return addrWithType
}
diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go
index 20a4d68e85..3b0cf74f99 100644
--- a/pkg/local_object_storage/engine/lock.go
+++ b/pkg/local_object_storage/engine/lock.go
@@ -1,14 +1,19 @@
package engine
import (
+ "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
)
var errLockFailed = errors.New("lock operation failed")
@@ -19,21 +24,38 @@ var errLockFailed = errors.New("lock operation failed")
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
-func (e *StorageEngine) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Lock",
+ trace.WithAttributes(
+ attribute.String("container_id", idCnr.EncodeToString()),
+ attribute.String("locker", locker.EncodeToString()),
+ attribute.Int("locked_count", len(locked)),
+ ))
+ defer span.End()
+ defer elapsed("Lock", e.metrics.AddMethodDuration)()
+
return e.execIfNotBlocked(func() error {
- return e.lock(idCnr, locker, locked)
+ return e.lock(ctx, idCnr, locker, locked)
})
}
-func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
for i := range locked {
- switch e.lockSingle(idCnr, locker, locked[i], true) {
+ st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true)
+ if err != nil {
+ return err
+ }
+ switch st {
case 1:
- return logicerr.Wrap(apistatus.LockNonRegularObject{})
+ return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
- switch e.lockSingle(idCnr, locker, locked[i], false) {
+ st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false)
+ if err != nil {
+ return err
+ }
+ switch st {
case 1:
- return logicerr.Wrap(apistatus.LockNonRegularObject{})
+ return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
return logicerr.Wrap(errLockFailed)
}
@@ -47,16 +69,13 @@ func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error
// - 0: fail
// - 1: locking irregular object
// - 2: ok
-func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
+func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) {
// code is pretty similar to inhumeAddr, maybe unify?
root := false
- var errIrregular apistatus.LockNonRegularObject
-
var addrLocked oid.Address
addrLocked.SetContainer(idCnr)
addrLocked.SetObject(locked)
-
- e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) {
+ retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) {
defer func() {
// if object is root we continue since information about it
// can be presented in other shards
@@ -67,19 +86,33 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi
if checkExists {
var existsPrm shard.ExistsPrm
- existsPrm.SetAddress(addrLocked)
-
- exRes, err := sh.Exists(existsPrm)
+ existsPrm.Address = addrLocked
+ exRes, err := sh.Exists(ctx, existsPrm)
if err != nil {
var siErr *objectSDK.SplitInfoError
- if !errors.As(err, &siErr) {
+ var eiErr *objectSDK.ECInfoError
+ if errors.As(err, &eiErr) {
+ eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr)
+ if !ok {
+ return false
+ }
+
+ err = sh.Lock(ctx, idCnr, locker, eclocked)
+ if err != nil {
+ e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
+ return false
+ }
+ root = true
+ return false
+ } else if !errors.As(err, &siErr) {
if shard.IsErrObjectExpired(err) {
// object is already expired =>
// do not lock it
return true
}
-
- e.reportShardError(sh, "could not check locked object for presence in shard", err)
+ e.reportShardError(ctx, sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return
}
@@ -89,22 +122,35 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi
}
}
- err := sh.Lock(idCnr, locker, []oid.ID{locked})
+ err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked})
if err != nil {
- e.reportShardError(sh, "could not lock object in shard", err)
+ e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
+ var errIrregular *apistatus.LockNonRegularObject
if errors.As(err, &errIrregular) {
status = 1
return true
}
-
return false
}
-
status = 2
-
return true
})
-
return
}
+
+func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) {
+ eclocked := []oid.ID{locked}
+ for _, chunk := range eiErr.ECInfo().Chunks {
+ var objID oid.ID
+ err := objID.ReadFromV2(chunk.ID)
+ if err != nil {
+ e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
+ return nil, false
+ }
+ eclocked = append(eclocked, objID)
+ }
+ return eclocked, true
+}
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index 4d3ade8eeb..b8c9d6b1d6 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -2,21 +2,20 @@ package engine
import (
"context"
- "os"
"strconv"
"testing"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
@@ -31,6 +30,8 @@ func (t tss) IsTombstoneAvailable(ctx context.Context, _ oid.Address, epoch uint
}
func TestLockUserScenario(t *testing.T) {
+ t.Parallel()
+
// Tested user actions:
// 1. stores some object
// 2. locks the object
@@ -41,24 +42,25 @@ func TestLockUserScenario(t *testing.T) {
const lockerExpiresAfter = 13
cnr := cidtest.ID()
- tombObj := generateObjectWithCID(t, cnr)
+ tombObj := testutil.GenerateObjectWithCID(cnr)
tombForLockID := oidtest.ID()
tombObj.SetID(tombForLockID)
- e := testEngineFromShardOpts(t, 2, []shard.Option{
- shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
- pool, err := ants.NewPool(sz)
- require.NoError(t, err)
+ testEngine := testNewEngine(t).
+ setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ require.NoError(t, err)
- return pool
- }),
- shard.WithTombstoneSource(tss{lockerExpiresAfter}),
- })
-
- t.Cleanup(func() {
- _ = e.Close()
- _ = os.RemoveAll(t.Name())
- })
+ return pool
+ }),
+ shard.WithTombstoneSource(tss{lockerExpiresAfter}),
+ }
+ }).
+ prepare(t)
+ e := testEngine.engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
lockerID := oidtest.ID()
tombID := oidtest.ID()
@@ -75,11 +77,11 @@ func TestLockUserScenario(t *testing.T) {
lockerAddr.SetContainer(cnr)
lockerAddr.SetObject(lockerID)
- var a object.Attribute
+ var a objectSDK.Attribute
a.SetKey(objectV2.SysAttributeExpEpoch)
a.SetValue(strconv.Itoa(lockerExpiresAfter))
- lockerObj := generateObjectWithCID(t, cnr)
+ lockerObj := testutil.GenerateObjectWithCID(cnr)
lockerObj.SetID(lockerID)
lockerObj.SetAttributes(a)
@@ -88,77 +90,80 @@ func TestLockUserScenario(t *testing.T) {
tombForLockAddr.SetObject(tombForLockID)
// 1.
- obj := generateObjectWithCID(t, cnr)
+ obj := testutil.GenerateObjectWithCID(cnr)
id, _ := obj.ID()
objAddr.SetObject(id)
- err = Put(e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
- var locker object.Lock
+ var locker objectSDK.Lock
locker.WriteMembers([]oid.ID{id})
- object.WriteLock(lockerObj, locker)
+ objectSDK.WriteLock(lockerObj, locker)
- err = Put(e, lockerObj)
+ err = Put(context.Background(), e, lockerObj, false)
require.NoError(t, err)
- err = e.Lock(cnr, lockerID, []oid.ID{id})
+ err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id})
require.NoError(t, err)
// 3.
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombAddr, objAddr)
- _, err = e.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ var objLockedErr *apistatus.ObjectLocked
+ err = e.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
// 4.
- tombObj.SetType(object.TypeTombstone)
+ tombObj.SetType(objectSDK.TypeTombstone)
tombObj.SetID(tombForLockID)
tombObj.SetAttributes(a)
- err = Put(e, tombObj)
+ err = Put(context.Background(), e, tombObj, false)
require.NoError(t, err)
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
- _, err = e.Inhume(inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.ErrorIs(t, err, meta.ErrLockObjectRemoval)
// 5.
- e.HandleNewEpoch(lockerExpiresAfter + 1)
-
- // delay for GC
- time.Sleep(time.Second)
+ e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1)
inhumePrm.WithTarget(tombAddr, objAddr)
- _, err = e.Inhume(inhumePrm)
- require.NoError(t, err)
+ require.Eventually(t, func() bool {
+ err = e.Inhume(context.Background(), inhumePrm)
+ return err == nil
+ }, 30*time.Second, time.Second)
}
func TestLockExpiration(t *testing.T) {
+ t.Parallel()
+
// Tested scenario:
// 1. some object is stored
// 2. lock object for it is stored, and the object is locked
// 3. lock expiration epoch is coming
// 4. after some delay the object is not locked anymore
- e := testEngineFromShardOpts(t, 2, []shard.Option{
- shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
- pool, err := ants.NewPool(sz)
- require.NoError(t, err)
+ testEngine := testNewEngine(t).
+ setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ require.NoError(t, err)
- return pool
- }),
- })
-
- t.Cleanup(func() {
- _ = e.Close()
- _ = os.RemoveAll(t.Name())
- })
+ return pool
+ }),
+ }
+ }).
+ prepare(t)
+ e := testEngine.engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
const lockerExpiresAfter = 13
@@ -166,50 +171,55 @@ func TestLockExpiration(t *testing.T) {
var err error
// 1.
- obj := generateObjectWithCID(t, cnr)
+ obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
- var a object.Attribute
+ var a objectSDK.Attribute
a.SetKey(objectV2.SysAttributeExpEpoch)
a.SetValue(strconv.Itoa(lockerExpiresAfter))
- lock := generateObjectWithCID(t, cnr)
- lock.SetType(object.TypeLock)
+ lock := testutil.GenerateObjectWithCID(cnr)
+ lock.SetType(objectSDK.TypeLock)
lock.SetAttributes(a)
- err = Put(e, lock)
+ err = Put(context.Background(), e, lock, false)
require.NoError(t, err)
id, _ := obj.ID()
idLock, _ := lock.ID()
- err = e.Lock(cnr, idLock, []oid.ID{id})
+ err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id})
require.NoError(t, err)
var inhumePrm InhumePrm
- inhumePrm.WithTarget(objecttest.Address(), objectcore.AddressOf(obj))
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(cnr)
+ inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ var objLockedErr *apistatus.ObjectLocked
+ err = e.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
// 3.
- e.HandleNewEpoch(lockerExpiresAfter + 1)
-
- // delay for GC processing. It can't be estimated, but making it bigger
- // will slow down test
- time.Sleep(time.Second)
+ e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1)
// 4.
- inhumePrm.WithTarget(objecttest.Address(), objectcore.AddressOf(obj))
+ tombAddr = oidtest.Address()
+ tombAddr.SetContainer(cnr)
+ inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
- require.NoError(t, err)
+ require.Eventually(t, func() bool {
+ err = e.Inhume(context.Background(), inhumePrm)
+ return err == nil
+ }, 30*time.Second, time.Second)
}
func TestLockForceRemoval(t *testing.T) {
+ t.Parallel()
+
// Tested scenario:
// 1. some object is stored
// 2. lock object for it is stored, and the object is locked
@@ -218,66 +228,119 @@ func TestLockForceRemoval(t *testing.T) {
// 5. the object is not locked anymore
var e *StorageEngine
- e = testEngineFromShardOpts(t, 2, []shard.Option{
- shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
- pool, err := ants.NewPool(sz)
- require.NoError(t, err)
+ e = testNewEngine(t).
+ setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ require.NoError(t, err)
- return pool
- }),
- shard.WithDeletedLockCallback(e.processDeletedLocks),
- })
-
- t.Cleanup(func() {
- _ = e.Close()
- _ = os.RemoveAll(t.Name())
- })
+ return pool
+ }),
+ shard.WithDeletedLockCallback(e.processDeletedLocks),
+ }
+ }).
+ prepare(t).engine
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
cnr := cidtest.ID()
var err error
// 1.
- obj := generateObjectWithCID(t, cnr)
+ obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
- lock := generateObjectWithCID(t, cnr)
- lock.SetType(object.TypeLock)
+ lock := testutil.GenerateObjectWithCID(cnr)
+ lock.SetType(objectSDK.TypeLock)
- err = Put(e, lock)
+ err = Put(context.Background(), e, lock, false)
require.NoError(t, err)
id, _ := obj.ID()
idLock, _ := lock.ID()
- err = e.Lock(cnr, idLock, []oid.ID{id})
+ err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id})
require.NoError(t, err)
// 3.
var inhumePrm InhumePrm
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ var objLockedErr *apistatus.ObjectLocked
+ err = e.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
- inhumePrm.WithTarget(objecttest.Address(), objectcore.AddressOf(obj))
+ inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ err = e.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
// 4.
var deletePrm DeletePrm
deletePrm.WithAddress(objectcore.AddressOf(lock))
deletePrm.WithForceRemoval()
- _, err = e.Delete(deletePrm)
- require.NoError(t, err)
+ require.NoError(t, e.Delete(context.Background(), deletePrm))
// 5.
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
+ err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
}
+
+func TestLockExpiredRegularObject(t *testing.T) {
+ const currEpoch = 42
+ const objectExpiresAfter = currEpoch - 1
+
+ engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
+ return []shard.Option{
+ shard.WithDisabledGC(),
+ shard.WithMetaBaseOptions(append(
+ testGetDefaultMetabaseOptions(t),
+ meta.WithEpochState(epochState{currEpoch}),
+ )...),
+ }
+ }).prepare(t).engine
+
+ cnr := cidtest.ID()
+
+ object := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(object, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter))
+
+ address := objectcore.AddressOf(object)
+
+ var putPrm PutPrm
+ putPrm.Object = object
+ require.NoError(t, engine.Put(context.Background(), putPrm))
+
+ var getPrm GetPrm
+ var errNotFound *apistatus.ObjectNotFound
+
+ getPrm.WithAddress(address)
+ _, err := engine.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, &errNotFound)
+
+ t.Run("lock expired regular object", func(t *testing.T) {
+ engine.Lock(context.Background(),
+ address.Container(),
+ oidtest.ID(),
+ []oid.ID{address.Object()},
+ )
+
+ res, err := engine.IsLocked(context.Background(), objectcore.AddressOf(object))
+ require.NoError(t, err)
+ require.True(t, res)
+ })
+
+ t.Run("get expired and locked regular object", func(t *testing.T) {
+ getPrm.WithAddress(objectcore.AddressOf(object))
+
+ res, err := engine.Get(context.Background(), getPrm)
+ require.NoError(t, err)
+ require.Equal(t, res.Object(), object)
+ })
+}
diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go
index 13dcdfe020..963292d835 100644
--- a/pkg/local_object_storage/engine/metrics.go
+++ b/pkg/local_object_storage/engine/metrics.go
@@ -2,34 +2,92 @@ package engine
import (
"time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
-type MetricRegister interface {
- AddListContainersDuration(d time.Duration)
- AddEstimateContainerSizeDuration(d time.Duration)
- AddDeleteDuration(d time.Duration)
- AddExistsDuration(d time.Duration)
- AddGetDuration(d time.Duration)
- AddHeadDuration(d time.Duration)
- AddInhumeDuration(d time.Duration)
- AddPutDuration(d time.Duration)
- AddRangeDuration(d time.Duration)
- AddSearchDuration(d time.Duration)
- AddListObjectsDuration(d time.Duration)
+type (
+ MetricRegister = metrics.EngineMetrics
+ GCMetrics = metrics.GCMetrics
+ WriteCacheMetrics = metrics.WriteCacheMetrics
+ NullBool = metrics.NullBool
+)
- SetObjectCounter(shardID, objectType string, v uint64)
- AddToObjectCounter(shardID, objectType string, delta int)
-
- SetReadonly(shardID string, readonly bool)
-
- AddToContainerSize(cnrID string, size int64)
- AddToPayloadCounter(shardID string, size int64)
-}
-
-func elapsed(addFunc func(d time.Duration)) func() {
+func elapsed(method string, addFunc func(method string, d time.Duration)) func() {
t := time.Now()
return func() {
- addFunc(time.Since(t))
+ addFunc(method, time.Since(t))
}
}
+
+type gcMetrics struct {
+ storage metrics.GCMetrics
+ shardID string
+}
+
+func (m *gcMetrics) SetShardID(id string) {
+ m.shardID = id
+}
+
+func (m *gcMetrics) AddRunDuration(d time.Duration, success bool) {
+ m.storage.AddRunDuration(m.shardID, d, success)
+}
+
+func (m *gcMetrics) AddDeletedCount(deleted, failed uint64) {
+ m.storage.AddDeletedCount(m.shardID, deleted, failed)
+}
+
+func (m *gcMetrics) AddExpiredObjectCollectionDuration(d time.Duration, success bool, objectType string) {
+ m.storage.AddExpiredObjectCollectionDuration(m.shardID, d, success, objectType)
+}
+
+func (m *gcMetrics) AddInhumedObjectCount(count uint64, objectType string) {
+ m.storage.AddInhumedObjectCount(m.shardID, count, objectType)
+}
+
+type (
+ noopMetrics struct{}
+ noopWriteCacheMetrics struct{}
+ noopGCMetrics struct{}
+)
+
+var (
+ _ MetricRegister = noopMetrics{}
+ _ WriteCacheMetrics = noopWriteCacheMetrics{}
+ _ GCMetrics = noopGCMetrics{}
+)
+
+func (noopMetrics) AddMethodDuration(string, time.Duration) {}
+func (noopMetrics) SetObjectCounter(string, string, uint64) {}
+func (noopMetrics) AddToObjectCounter(string, string, int) {}
+func (noopMetrics) SetMode(string, mode.Mode) {}
+func (noopMetrics) AddToContainerSize(string, int64) {}
+func (noopMetrics) DeleteContainerSize(string) {}
+func (noopMetrics) DeleteContainerCount(string) {}
+func (noopMetrics) AddToPayloadCounter(string, int64) {}
+func (noopMetrics) IncErrorCounter(string) {}
+func (noopMetrics) ClearErrorCounter(string) {}
+func (noopMetrics) DeleteShardMetrics(string) {}
+func (noopMetrics) SetContainerObjectCounter(string, string, string, uint64) {}
+func (noopMetrics) IncContainerObjectCounter(string, string, string) {}
+func (noopMetrics) SubContainerObjectCounter(string, string, string, uint64) {}
+func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {}
+func (noopMetrics) SetRefillPercent(string, string, uint32) {}
+func (noopMetrics) SetRefillStatus(string, string, string) {}
+func (noopMetrics) SetEvacuationInProgress(string, bool) {}
+func (noopMetrics) WriteCache() WriteCacheMetrics { return noopWriteCacheMetrics{} }
+func (noopMetrics) GC() GCMetrics { return noopGCMetrics{} }
+
+func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {}
+func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {}
+func (noopWriteCacheMetrics) SetEstimateSize(string, string, string, uint64) {}
+func (noopWriteCacheMetrics) SetMode(string, string) {}
+func (noopWriteCacheMetrics) IncOperationCounter(string, string, string, string, metrics.NullBool) {}
+func (noopWriteCacheMetrics) Close(string, string) {}
+
+func (noopGCMetrics) AddRunDuration(string, time.Duration, bool) {}
+func (noopGCMetrics) AddDeletedCount(string, uint64, uint64) {}
+func (noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {}
+func (noopGCMetrics) AddInhumedObjectCount(string, uint64, string) {}
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 68a4467f25..10cf5ffd5d 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -1,33 +1,43 @@
package engine
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *objectSDK.Object
+ Object *objectSDK.Object
+ IsIndexedContainer bool
}
-// PutRes groups the resulting values of Put operation.
-type PutRes struct{}
-
var errPutShard = errors.New("could not put object to any shard")
-// WithObject is a Put option to set object to save.
-//
-// Option is required.
-func (p *PutPrm) WithObject(obj *objectSDK.Object) {
- p.obj = obj
+type putToShardStatus byte
+
+const (
+ putToShardUnknown putToShardStatus = iota
+ putToShardSuccess
+ putToShardExists
+ putToShardRemoved
+)
+
+type putToShardRes struct {
+ status putToShardStatus
+ err error
}
// Put saves the object to local storage.
@@ -38,128 +48,139 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) {
// Returns an error if executions are blocked (see BlockExecution).
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed.
-func (e *StorageEngine) Put(prm PutPrm) (res PutRes, err error) {
+func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put",
+ trace.WithAttributes(
+ attribute.String("address", object.AddressOf(prm.Object).EncodeToString()),
+ ))
+ defer span.End()
+ defer elapsed("Put", e.metrics.AddMethodDuration)()
+
err = e.execIfNotBlocked(func() error {
- res, err = e.put(prm)
+ err = e.put(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddPutDuration)()
- }
-
- addr := object.AddressOf(prm.obj)
+func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
+ addr := object.AddressOf(prm.Object)
// In #1146 this check was parallelized, however, it became
// much slower on fast machines for 4 shards.
- _, err := e.exists(addr)
+ var ecParent oid.Address
+ if prm.Object.ECHeader() != nil {
+ ecParent.SetObject(prm.Object.ECHeader().Parent())
+ ecParent.SetContainer(addr.Container())
+ }
+ var shPrm shard.ExistsPrm
+ shPrm.Address = addr
+ shPrm.ECParentAddress = ecParent
+ existed, locked, err := e.exists(ctx, shPrm)
if err != nil {
- return PutRes{}, err
+ return err
}
- finished := false
+ if !existed && locked {
+ lockers, err := e.GetLocks(ctx, ecParent)
+ if err != nil {
+ return err
+ }
+ for _, locker := range lockers {
+ err = e.lock(ctx, addr.Container(), locker, []oid.ID{addr.Object()})
+ if err != nil {
+ return err
+ }
+ }
+ }
- e.iterateOverSortedShards(addr, func(ind int, sh hashedShard) (stop bool) {
+ var shRes putToShardRes
+ if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
e.mtx.RLock()
- pool, ok := e.shardPools[sh.ID().String()]
+ _, ok := e.shards[sh.ID().String()]
e.mtx.RUnlock()
if !ok {
// Shard was concurrently removed, skip.
return false
}
-
- putDone, exists := e.putToShard(sh, ind, pool, addr, prm.obj)
- finished = putDone || exists
- return finished
- })
-
- if !finished {
- err = errPutShard
+ shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer)
+ return shRes.status != putToShardUnknown
+ }); err != nil {
+ return err
+ }
+ switch shRes.status {
+ case putToShardUnknown:
+ return errPutShard
+ case putToShardRemoved:
+ return shRes.err
+ case putToShardExists, putToShardSuccess:
+ return nil
+ default:
+ return errPutShard
}
-
- return PutRes{}, err
}
// putToShard puts object to sh.
-// First return value is true iff put has been successfully done.
-// Second return value is true iff object already exists.
-func (e *StorageEngine) putToShard(sh hashedShard, ind int, pool util.WorkerPool, addr oid.Address, obj *objectSDK.Object) (bool, bool) {
- var putSuccess, alreadyExists bool
+// Return putToShardStatus and error if it is necessary to propagate an error upper.
+func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard,
+ addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool,
+) (res putToShardRes) {
+ var existPrm shard.ExistsPrm
+ existPrm.Address = addr
- exitCh := make(chan struct{})
-
- if err := pool.Submit(func() {
- defer close(exitCh)
-
- var existPrm shard.ExistsPrm
- existPrm.SetAddress(addr)
-
- exists, err := sh.Exists(existPrm)
- if err != nil {
- if shard.IsErrObjectExpired(err) {
- // object is already found but
- // expired => do nothing with it
- alreadyExists = true
- }
-
- return // this is not ErrAlreadyRemoved error so we can go to the next shard
+ exists, err := sh.Exists(ctx, existPrm)
+ if err != nil {
+ if shard.IsErrObjectExpired(err) {
+ // object is already found but
+ // expired => do nothing with it
+ res.status = putToShardExists
+ } else {
+ e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.Error(err))
}
- alreadyExists = exists.Exists()
- if alreadyExists {
- if ind != 0 {
- var toMoveItPrm shard.ToMoveItPrm
- toMoveItPrm.SetAddress(addr)
-
- _, err = sh.ToMoveIt(toMoveItPrm)
- if err != nil {
- e.log.Warn("could not mark object for shard relocation",
- zap.Stringer("shard", sh.ID()),
- zap.String("error", err.Error()),
- )
- }
- }
-
- return
- }
-
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
-
- _, err = sh.Put(putPrm)
- if err != nil {
- if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
- errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
- e.log.Warn("could not put object to shard",
- zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()))
- return
- }
-
- e.reportShardError(sh, "could not put object to shard", err)
- return
- }
-
- putSuccess = true
- }); err != nil {
- close(exitCh)
+ return // this is not ErrAlreadyRemoved error so we can go to the next shard
}
- <-exitCh
+ if exists.Exists() {
+ res.status = putToShardExists
+ return
+ }
- return putSuccess, alreadyExists
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ putPrm.SetIndexAttributes(isIndexedContainer)
+
+ _, err = sh.Put(ctx, putPrm)
+ if err != nil {
+ if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
+ errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
+ e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.Error(err))
+ return
+ }
+ if client.IsErrObjectAlreadyRemoved(err) {
+ e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.Error(err))
+ res.status = putToShardRemoved
+ res.err = err
+ return
+ }
+
+ e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr))
+ return
+ }
+
+ res.status = putToShardSuccess
+
+ return
}
// Put writes provided object to local storage.
-func Put(storage *StorageEngine, obj *objectSDK.Object) error {
- var putPrm PutPrm
- putPrm.WithObject(obj)
-
- _, err := storage.Put(putPrm)
-
- return err
+func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object, indexedContainer bool) error {
+ return storage.Put(ctx, PutPrm{Object: obj, IsIndexedContainer: indexedContainer})
}
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index d6e6b08ff5..7ec4742d8c 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -1,14 +1,21 @@
package engine
import (
+ "context"
"errors"
+ "strconv"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -56,143 +63,174 @@ func (r RngRes) Object() *objectSDK.Object {
// Returns ErrRangeOutOfBounds if the requested object range is out of bounds.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) GetRange(prm RngPrm) (res RngRes, err error) {
+func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.String("offset", strconv.FormatUint(prm.off, 10)),
+ attribute.String("length", strconv.FormatUint(prm.ln, 10)),
+ ))
+ defer span.End()
+ defer elapsed("GetRange", e.metrics.AddMethodDuration)()
+
err = e.execIfNotBlocked(func() error {
- res, err = e.getRange(prm)
+ res, err = e.getRange(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) getRange(prm RngPrm) (RngRes, error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddRangeDuration)()
- }
-
- var (
- obj *objectSDK.Object
- siErr *objectSDK.SplitInfoError
-
- errNotFound apistatus.ObjectNotFound
-
- outSI *objectSDK.SplitInfo
- outError error = errNotFound
-
- shardWithMeta hashedShard
- metaError error
- )
-
- var hasDegraded bool
-
+func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error) {
var shPrm shard.RngPrm
shPrm.SetAddress(prm.addr)
shPrm.SetRange(prm.off, prm.ln)
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- noMeta := sh.GetMode().NoMetabase()
- hasDegraded = hasDegraded || noMeta
- shPrm.SetIgnoreMeta(noMeta)
-
- res, err := sh.GetRange(shPrm)
- if err != nil {
- if res.HasMeta() {
- shardWithMeta = sh
- metaError = err
- }
- switch {
- case shard.IsErrNotFound(err):
- return false // ignore, go to next shard
- case errors.As(err, &siErr):
- if outSI == nil {
- outSI = objectSDK.NewSplitInfo()
- }
-
- util.MergeSplitInfo(siErr.SplitInfo(), outSI)
-
- _, withLink := outSI.Link()
- _, withLast := outSI.LastPart()
-
- // stop iterating over shards if SplitInfo structure is complete
- if withLink && withLast {
- return true
- }
-
- return false
- case
- shard.IsErrRemoved(err),
- shard.IsErrOutOfRange(err):
- outError = err
-
- return true // stop, return it back
- default:
- e.reportShardError(sh, "could not get object from shard", err)
- return false
- }
- }
-
- obj = res.Object()
-
- return true
- })
-
- if outSI != nil {
- return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI))
+ it := &getRangeShardIterator{
+ OutError: new(apistatus.ObjectNotFound),
+ ShardPrm: shPrm,
+ Address: prm.addr,
+ Engine: e,
}
- if obj == nil {
+ if err := it.tryGetWithMeta(ctx); err != nil {
+ return RngRes{}, err
+ }
+
+ if it.SplitInfo != nil {
+ return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
+ }
+ if it.ECInfo != nil {
+ return RngRes{}, logicerr.Wrap(objectSDK.NewECInfoError(it.ECInfo))
+ }
+
+ if it.Object == nil {
// If any shard is in a degraded mode, we should assume that metabase could store
// info about some object.
- if shardWithMeta.Shard == nil && !hasDegraded || !shard.IsErrNotFound(outError) {
- return RngRes{}, outError
+ if it.ShardWithMeta.Shard == nil && !it.HasDegraded || !client.IsErrObjectNotFound(it.OutError) {
+ return RngRes{}, it.OutError
}
- // If the object is not found but is present in metabase,
- // try to fetch it from blobstor directly. If it is found in any
- // blobstor, increase the error counter for the shard which contains the meta.
- shPrm.SetIgnoreMeta(true)
-
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- if sh.GetMode().NoMetabase() {
- // Already processed it without a metabase.
- return false
- }
-
- res, err := sh.GetRange(shPrm)
- if shard.IsErrOutOfRange(err) {
- var errOutOfRange apistatus.ObjectOutOfRange
-
- outError = errOutOfRange
- return true
- }
- obj = res.Object()
- return err == nil
- })
- if obj == nil {
- return RngRes{}, outError
+ if err := it.tryGetFromBlobstor(ctx); err != nil {
+ return RngRes{}, err
}
- if shardWithMeta.Shard != nil {
- e.reportShardError(shardWithMeta, "meta info was present, but object is missing",
- metaError,
+
+ if it.Object == nil {
+ return RngRes{}, it.OutError
+ }
+ if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
+ e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
+ zap.Stringer("shard_id", it.ShardWithMeta.ID()),
+ zap.Error(it.MetaError),
zap.Stringer("address", prm.addr))
}
}
return RngRes{
- obj: obj,
+ obj: it.Object,
}, nil
}
// GetRange reads object payload range from local storage by provided address.
-func GetRange(storage *StorageEngine, addr oid.Address, rng *objectSDK.Range) ([]byte, error) {
+func GetRange(ctx context.Context, storage *StorageEngine, addr oid.Address, rng *objectSDK.Range) ([]byte, error) {
var rangePrm RngPrm
rangePrm.WithAddress(addr)
rangePrm.WithPayloadRange(rng)
- res, err := storage.GetRange(rangePrm)
+ res, err := storage.GetRange(ctx, rangePrm)
if err != nil {
return nil, err
}
return res.Object().Payload(), nil
}
+
+type getRangeShardIterator struct {
+ Object *objectSDK.Object
+ SplitInfoError *objectSDK.SplitInfoError
+ SplitInfo *objectSDK.SplitInfo
+ ECInfoError *objectSDK.ECInfoError
+ ECInfo *objectSDK.ECInfo
+ OutError error
+ ShardWithMeta hashedShard
+ MetaError error
+ HasDegraded bool
+
+ ShardPrm shard.RngPrm
+ Address oid.Address
+ Engine *StorageEngine
+}
+
+func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error {
+ return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+ noMeta := sh.GetMode().NoMetabase()
+ i.HasDegraded = i.HasDegraded || noMeta
+ i.ShardPrm.SetIgnoreMeta(noMeta)
+
+ res, err := sh.GetRange(ctx, i.ShardPrm)
+ if err == nil {
+ i.Object = res.Object()
+ return true
+ }
+
+ if res.HasMeta() {
+ i.ShardWithMeta = sh
+ i.MetaError = err
+ }
+ switch {
+ case client.IsErrObjectNotFound(err):
+ return false // ignore, go to next shard
+ case errors.As(err, &i.SplitInfoError):
+ if i.SplitInfo == nil {
+ i.SplitInfo = objectSDK.NewSplitInfo()
+ }
+
+ util.MergeSplitInfo(i.SplitInfoError.SplitInfo(), i.SplitInfo)
+
+ _, withLink := i.SplitInfo.Link()
+ _, withLast := i.SplitInfo.LastPart()
+
+ // stop iterating over shards if SplitInfo structure is complete
+ return withLink && withLast
+ case errors.As(err, &i.ECInfoError):
+ if i.ECInfo == nil {
+ i.ECInfo = objectSDK.NewECInfo()
+ }
+
+ util.MergeECInfo(i.ECInfoError.ECInfo(), i.ECInfo)
+ // stop iterating over shards if ECInfo structure is complete
+ return len(i.ECInfo.Chunks) == int(i.ECInfo.Chunks[0].Total)
+ case
+ client.IsErrObjectAlreadyRemoved(err),
+ shard.IsErrOutOfRange(err):
+ i.OutError = err
+
+ return true // stop, return it back
+ default:
+ i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
+ return false
+ }
+ })
+}
+
+func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error {
+ // If the object is not found but is present in metabase,
+ // try to fetch it from blobstor directly. If it is found in any
+ // blobstor, increase the error counter for the shard which contains the meta.
+ i.ShardPrm.SetIgnoreMeta(true)
+
+ return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+ if sh.GetMode().NoMetabase() {
+ // Already processed it without a metabase.
+ return false
+ }
+
+ res, err := sh.GetRange(ctx, i.ShardPrm)
+ if shard.IsErrOutOfRange(err) {
+ i.OutError = new(apistatus.ObjectOutOfRange)
+ return true
+ }
+ i.Object = res.Object()
+ return err == nil
+ })
+}
diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go
new file mode 100644
index 0000000000..a29dd7ed90
--- /dev/null
+++ b/pkg/local_object_storage/engine/rebuild.go
@@ -0,0 +1,108 @@
+package engine
+
+import (
+ "context"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "golang.org/x/sync/errgroup"
+)
+
+type RebuildPrm struct {
+ ShardIDs []*shard.ID
+ ConcurrencyLimit uint32
+ TargetFillPercent uint32
+}
+
+type ShardRebuildResult struct {
+ ShardID *shard.ID
+ Success bool
+ ErrorMsg string
+}
+
+type RebuildRes struct {
+ ShardResults []ShardRebuildResult
+}
+
+func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Rebuild",
+ trace.WithAttributes(
+ attribute.Int("shard_id_count", len(prm.ShardIDs)),
+ attribute.Int64("target_fill_percent", int64(prm.TargetFillPercent)),
+ attribute.Int64("concurrency_limit", int64(prm.ConcurrencyLimit)),
+ ))
+ defer span.End()
+
+ res := RebuildRes{
+ ShardResults: make([]ShardRebuildResult, 0, len(prm.ShardIDs)),
+ }
+ resGuard := &sync.Mutex{}
+
+ concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)}
+
+ eg, egCtx := errgroup.WithContext(ctx)
+ for _, shardID := range prm.ShardIDs {
+ eg.Go(func() error {
+ e.mtx.RLock()
+ sh, ok := e.shards[shardID.String()]
+ e.mtx.RUnlock()
+
+ if !ok {
+ resGuard.Lock()
+ defer resGuard.Unlock()
+ res.ShardResults = append(res.ShardResults, ShardRebuildResult{
+ ShardID: shardID,
+ ErrorMsg: errShardNotFound.Error(),
+ })
+ return nil
+ }
+
+ err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{
+ ConcurrencyLimiter: concLimiter,
+ TargetFillPercent: prm.TargetFillPercent,
+ })
+
+ resGuard.Lock()
+ defer resGuard.Unlock()
+
+ if err != nil {
+ res.ShardResults = append(res.ShardResults, ShardRebuildResult{
+ ShardID: shardID,
+ ErrorMsg: err.Error(),
+ })
+ } else {
+ res.ShardResults = append(res.ShardResults, ShardRebuildResult{
+ ShardID: shardID,
+ Success: true,
+ })
+ }
+ return nil
+ })
+ }
+
+ if err := eg.Wait(); err != nil {
+ return RebuildRes{}, err
+ }
+ return res, nil
+}
+
+type concurrencyLimiter struct {
+ semaphore chan struct{}
+}
+
+func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
+ select {
+ case l.semaphore <- struct{}{}:
+ return l.releaseWorkSlot, nil
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
+
+func (l *concurrencyLimiter) releaseWorkSlot() {
+ <-l.semaphore
+}
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
new file mode 100644
index 0000000000..8ab3c5217f
--- /dev/null
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -0,0 +1,139 @@
+package engine
+
+import (
+ "context"
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/hrw"
+ "go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+// errRemoveDuplicatesInProgress is returned when another rebalancing is in progress.
+// We need it because `Rebalance` removes objects and executing it concurrently
+// on 2 shards can lead to data loss. In future this restriction could be relaxed.
+var errRemoveDuplicatesInProgress = errors.New("redundant copies removal is already in progress")
+
+const defaultRemoveDuplicatesConcurrency = 256
+
+type RemoveDuplicatesPrm struct {
+ Concurrency int
+}
+
+// RemoveDuplicates iterates over all objects and removes duplicate object copies
+// from shards which are worse as defined by HRW sort.
+// Safety:
+// 1. Concurrent execution is prohibited, thus 1 object copy should always be left.
+// 2. If we delete an object from another thread, this is not a problem. Currently,
+// we have 2 thread that can remove "valid" (non-expired and logically non-removed) objects:
+// policer and rebalance. For rebalance see (1).
+// If policer removes something, we do not care if both copies are removed or one of them is left,
+// as the remaining copy will be removed during the next policer iteration.
+func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicatesPrm) error {
+ if !e.removeDuplicatesInProgress.CompareAndSwap(false, true) {
+ return errRemoveDuplicatesInProgress
+ }
+ defer e.removeDuplicatesInProgress.Store(false)
+
+ if prm.Concurrency <= 0 {
+ prm.Concurrency = defaultRemoveDuplicatesConcurrency
+ }
+
+ e.log.Info(ctx, logs.EngineStartingRemovalOfLocallyredundantCopies,
+ zap.Int("concurrency", prm.Concurrency))
+
+ // The mutext must be taken for the whole duration to avoid target shard being removed
+ // concurrently: this can lead to data loss.
+ e.mtx.RLock()
+ defer e.mtx.RUnlock()
+
+ // Iterate by shards to be sure that no objects from 2 different shards are removed simultaneously.
+ // This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
+ // However we could change weights in future and easily forget this function.
+ for _, sh := range e.shards {
+ e.log.Debug(ctx, logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
+ ch := make(chan oid.Address)
+
+ errG, ctx := errgroup.WithContext(ctx)
+ errG.SetLimit(prm.Concurrency + 1) // +1 for the listing thread
+
+ errG.Go(func() error {
+ defer close(ch)
+
+ var cursor *meta.Cursor
+ for {
+ var listPrm shard.ListWithCursorPrm
+ listPrm.WithCount(uint32(prm.Concurrency))
+ listPrm.WithCursor(cursor)
+ res, err := sh.ListWithCursor(ctx, listPrm)
+ if err != nil {
+ if errors.Is(err, meta.ErrEndOfListing) {
+ return nil
+ }
+ return err
+ }
+ for _, addr := range res.AddressList() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case ch <- addr.Address:
+ }
+ }
+ cursor = res.Cursor()
+ }
+ })
+
+ for range prm.Concurrency {
+ errG.Go(func() error {
+ return e.removeObjects(ctx, ch)
+ })
+ }
+ if err := errG.Wait(); err != nil {
+ e.log.Error(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
+ return err
+ }
+ }
+
+ e.log.Info(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies)
+ return nil
+}
+
+// removeObjects reads addresses from ch and removes all objects from other shards, excluding excludeID.
+func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address) error {
+ shards := make([]hashedShard, 0, len(e.shards))
+ for _, sh := range e.shards {
+ shards = append(shards, sh)
+ }
+
+ for addr := range ch {
+ h := hrw.StringHash(addr.EncodeToString())
+ hrw.SortHasherSliceByValue(shards, h)
+ found := false
+ for i := range shards {
+ var existsPrm shard.ExistsPrm
+ existsPrm.Address = addr
+
+ res, err := shards[i].Exists(ctx, existsPrm)
+ if err != nil {
+ return err
+ } else if !res.Exists() {
+ continue
+ } else if !found {
+ found = true
+ continue
+ }
+
+ var deletePrm shard.DeletePrm
+ deletePrm.SetAddresses(addr)
+ _, err = shards[i].Delete(ctx, deletePrm)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go
new file mode 100644
index 0000000000..6d2291c744
--- /dev/null
+++ b/pkg/local_object_storage/engine/remove_copies_test.go
@@ -0,0 +1,221 @@
+package engine
+
+import (
+ "context"
+ "sync"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRebalance(t *testing.T) {
+ t.Parallel()
+
+ te := newEngineWithErrorThreshold(t, "", 0)
+ defer func() {
+ require.NoError(t, te.ng.Close(context.Background()))
+ }()
+
+ const (
+ objCount = 20
+ copyCount = (objCount + 2) / 3
+ )
+
+ type objectWithShard struct {
+ bestShard shard.ID
+ worstShard shard.ID
+ object *objectSDK.Object
+ }
+
+ objects := make([]objectWithShard, objCount)
+ for i := range objects {
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ obj.SetPayload(make([]byte, errSmallSize))
+ objects[i].object = obj
+
+ shards := te.ng.sortShards(object.AddressOf(obj))
+ objects[i].bestShard = *shards[0].Shard.ID()
+ objects[i].worstShard = *shards[1].Shard.ID()
+ }
+
+ for i := range objects {
+ var prm shard.PutPrm
+ prm.SetObject(objects[i].object)
+
+ var err1, err2 error
+ te.ng.mtx.RLock()
+ // Every 3rd object (i%3 == 0) is put to both shards, others are distributed.
+ if i%3 != 1 {
+ _, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
+ }
+ if i%3 != 2 {
+ _, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
+ }
+ te.ng.mtx.RUnlock()
+
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+ }
+
+ var removedMtx sync.Mutex
+ var removed []deleteEvent
+ for _, shard := range te.shards {
+ id := *shard.id
+ shard.largeFileStorage.SetOption(teststore.WithDelete(func(prm common.DeletePrm) (common.DeleteRes, error) {
+ removedMtx.Lock()
+ removed = append(removed, deleteEvent{shardID: id, addr: prm.Address})
+ removedMtx.Unlock()
+ return common.DeleteRes{}, nil
+ }))
+ }
+
+ err := te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{})
+ require.NoError(t, err)
+
+ require.Equal(t, copyCount, len(removed))
+
+ removedMask := make([]bool, len(objects))
+loop:
+ for i := range removed {
+ for j := range objects {
+ if removed[i].addr == object.AddressOf(objects[j].object) {
+ require.Equal(t, objects[j].worstShard, removed[i].shardID,
+ "object %d was expected to be removed from another shard", j)
+ removedMask[j] = true
+ continue loop
+ }
+ }
+ require.FailNow(t, "unexpected object was removed", removed[i].addr)
+ }
+
+ for i := range copyCount {
+ if i%3 == 0 {
+ require.True(t, removedMask[i], "object %d was expected to be removed", i)
+ } else {
+ require.False(t, removedMask[i], "object %d was not expected to be removed", i)
+ }
+ }
+}
+
+func TestRebalanceSingleThread(t *testing.T) {
+ t.Parallel()
+
+ te := newEngineWithErrorThreshold(t, "", 0)
+ defer func() {
+ require.NoError(t, te.ng.Close(context.Background()))
+ }()
+
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ obj.SetPayload(make([]byte, errSmallSize))
+
+ var prm shard.PutPrm
+ prm.SetObject(obj)
+ te.ng.mtx.RLock()
+ _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
+ _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
+ te.ng.mtx.RUnlock()
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+
+ signal := make(chan struct{}) // unblock rebalance
+ started := make(chan struct{}) // make sure rebalance is started
+ for _, shard := range te.shards {
+ shard.largeFileStorage.SetOption(teststore.WithDelete(func(common.DeletePrm) (common.DeleteRes, error) {
+ close(started)
+ <-signal
+ return common.DeleteRes{}, nil
+ }))
+ }
+
+ var firstErr error
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ firstErr = te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{})
+ }()
+
+ <-started
+ secondErr := te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{})
+ require.ErrorIs(t, secondErr, errRemoveDuplicatesInProgress)
+
+ close(signal)
+ wg.Wait()
+ require.NoError(t, firstErr)
+}
+
+type deleteEvent struct {
+ shardID shard.ID
+ addr oid.Address
+}
+
+func TestRebalanceExitByContext(t *testing.T) {
+ te := newEngineWithErrorThreshold(t, "", 0)
+ defer func() {
+ require.NoError(t, te.ng.Close(context.Background()))
+ }()
+
+ objects := make([]*objectSDK.Object, 4)
+ for i := range objects {
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ obj.SetPayload(make([]byte, errSmallSize))
+ objects[i] = obj
+ }
+
+ for i := range objects {
+ var prm shard.PutPrm
+ prm.SetObject(objects[i])
+
+ te.ng.mtx.RLock()
+ _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
+ _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
+ te.ng.mtx.RUnlock()
+
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+ }
+
+ var removed []deleteEvent
+ deleteCh := make(chan struct{})
+ signal := make(chan struct{})
+ for _, shard := range te.shards {
+ id := *shard.id
+ shard.largeFileStorage.SetOption(teststore.WithDelete(func(prm common.DeletePrm) (common.DeleteRes, error) {
+ deleteCh <- struct{}{}
+ <-signal
+ removed = append(removed, deleteEvent{shardID: id, addr: prm.Address})
+ return common.DeleteRes{}, nil
+ }))
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ var rebalanceErr error
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ rebalanceErr = te.ng.RemoveDuplicates(ctx, RemoveDuplicatesPrm{Concurrency: 1})
+ }()
+
+ const removeCount = 3
+ for range removeCount - 1 {
+ <-deleteCh
+ signal <- struct{}{}
+ }
+ <-deleteCh
+ cancel()
+ close(signal)
+
+ wg.Wait()
+ require.ErrorIs(t, rebalanceErr, context.Canceled)
+ require.Equal(t, removeCount, len(removed))
+}
diff --git a/pkg/local_object_storage/engine/restore.go b/pkg/local_object_storage/engine/restore.go
deleted file mode 100644
index 84c750cd0d..0000000000
--- a/pkg/local_object_storage/engine/restore.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package engine
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
-
-// RestoreShard restores objects from dump to the shard with provided identifier.
-//
-// Returns an error if shard is not read-only.
-func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error {
- e.mtx.RLock()
- defer e.mtx.RUnlock()
-
- sh, ok := e.shards[id.String()]
- if !ok {
- return errShardNotFound
- }
-
- _, err := sh.Restore(prm)
- return err
-}
diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go
index 7b9b8be606..4243a54819 100644
--- a/pkg/local_object_storage/engine/select.go
+++ b/pkg/local_object_storage/engine/select.go
@@ -1,16 +1,22 @@
package engine
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters object.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ indexedContainer bool
}
// SelectRes groups the resulting values of Select operation.
@@ -19,12 +25,13 @@ type SelectRes struct {
}
// WithContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) WithContainerID(cnr cid.ID) {
+func (p *SelectPrm) WithContainerID(cnr cid.ID, indexedContainer bool) {
p.cnr = cnr
+ p.indexedContainer = indexedContainer
}
// WithFilters is a Select option to set the object filters.
-func (p *SelectPrm) WithFilters(fs object.SearchFilters) {
+func (p *SelectPrm) WithFilters(fs objectSDK.SearchFilters) {
p.filters = fs
}
@@ -38,33 +45,35 @@ func (r SelectRes) AddressList() []oid.Address {
// Returns any error encountered that did not allow to completely select the objects.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Select(prm SelectPrm) (res SelectRes, err error) {
+func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Select",
+ trace.WithAttributes(
+ attribute.String("container_id", prm.cnr.EncodeToString()),
+ ))
+ defer span.End()
+ defer elapsed("Select", e.metrics.AddMethodDuration)()
+
err = e.execIfNotBlocked(func() error {
- res, err = e._select(prm)
- return err
+ var sErr error
+ res, sErr = e._select(ctx, prm)
+ return sErr
})
return
}
-func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddSearchDuration)()
- }
-
+func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
addrList := make([]oid.Address, 0)
uniqueMap := make(map[string]struct{})
- var outError error
-
var shPrm shard.SelectPrm
- shPrm.SetContainerID(prm.cnr)
+ shPrm.SetContainerID(prm.cnr, prm.indexedContainer)
shPrm.SetFilters(prm.filters)
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- res, err := sh.Select(shPrm)
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ res, err := sh.Select(ctx, shPrm)
if err != nil {
- e.reportShardError(sh, "could not select objects from shard", err)
+ e.reportShardError(ctx, sh, "could not select objects from shard", err)
return false
}
@@ -76,40 +85,40 @@ func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) {
}
return false
- })
+ }); err != nil {
+ return SelectRes{}, err
+ }
return SelectRes{
addrList: addrList,
- }, outError
+ }, nil
}
// List returns `limit` available physically storage object addresses in engine.
// If limit is zero, then returns all available object addresses.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) List(limit uint64) (res SelectRes, err error) {
+func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) {
+ defer elapsed("List", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
- res, err = e.list(limit)
- return err
+ var lErr error
+ res, lErr = e.list(ctx, limit)
+ return lErr
})
return
}
-func (e *StorageEngine) list(limit uint64) (SelectRes, error) {
- if e.metrics != nil {
- defer elapsed(e.metrics.AddListObjectsDuration)()
- }
-
+func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) {
addrList := make([]oid.Address, 0, limit)
uniqueMap := make(map[string]struct{})
ln := uint64(0)
// consider iterating over shuffled shards
- e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- res, err := sh.List() // consider limit result of shard iterator
+ if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ res, err := sh.List(ctx) // consider limit result of shard iterator
if err != nil {
- e.reportShardError(sh, "could not select objects from shard", err)
+ e.reportShardError(ctx, sh, "could not select objects from shard", err)
} else {
for _, addr := range res.AddressList() { // save only unique values
if _, ok := uniqueMap[addr.EncodeToString()]; !ok {
@@ -125,7 +134,9 @@ func (e *StorageEngine) list(limit uint64) (SelectRes, error) {
}
return false
- })
+ }); err != nil {
+ return SelectRes{}, err
+ }
return SelectRes{
addrList: addrList,
@@ -133,12 +144,12 @@ func (e *StorageEngine) list(limit uint64) (SelectRes, error) {
}
// Select selects objects from local storage using provided filters.
-func Select(storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.Address, error) {
+func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, isIndexedContainer bool, fs objectSDK.SearchFilters) ([]oid.Address, error) {
var selectPrm SelectPrm
- selectPrm.WithContainerID(cnr)
+ selectPrm.WithContainerID(cnr, isIndexedContainer)
selectPrm.WithFilters(fs)
- res, err := storage.Select(selectPrm)
+ res, err := storage.Select(ctx, selectPrm)
if err != nil {
return nil, err
}
@@ -148,8 +159,8 @@ func Select(storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.
// List returns `limit` available physically storage object addresses in
// engine. If limit is zero, then returns all available object addresses.
-func List(storage *StorageEngine, limit uint64) ([]oid.Address, error) {
- res, err := storage.List(limit)
+func List(ctx context.Context, storage *StorageEngine, limit uint64) ([]oid.Address, error) {
+ res, err := storage.List(ctx, limit)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 34210d8354..69067c500b 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -1,17 +1,24 @@
package engine
import (
+ "context"
+ "errors"
"fmt"
+ "sync"
+ "sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
"github.com/google/uuid"
- "github.com/panjf2000/ants/v2"
- "go.uber.org/atomic"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
)
var errShardNotFound = logicerr.New("shard not found")
@@ -44,12 +51,8 @@ func (m *metricsWithID) IncObjectCounter(objectType string) {
m.mw.AddToObjectCounter(m.id, objectType, +1)
}
-func (m *metricsWithID) DecObjectCounter(objectType string) {
- m.mw.AddToObjectCounter(m.id, objectType, -1)
-}
-
-func (m *metricsWithID) SetReadonly(readonly bool) {
- m.mw.SetReadonly(m.id, readonly)
+func (m *metricsWithID) SetMode(mode mode.Mode) {
+ m.mw.SetMode(m.id, mode)
}
func (m *metricsWithID) AddToContainerSize(cnr string, size int64) {
@@ -60,71 +63,123 @@ func (m *metricsWithID) AddToPayloadSize(size int64) {
m.mw.AddToPayloadCounter(m.id, size)
}
+func (m *metricsWithID) IncErrorCounter() {
+ m.mw.IncErrorCounter(m.id)
+}
+
+func (m *metricsWithID) ClearErrorCounter() {
+ m.mw.ClearErrorCounter(m.id)
+}
+
+func (m *metricsWithID) DeleteShardMetrics() {
+ m.mw.DeleteShardMetrics(m.id)
+}
+
+func (m *metricsWithID) SetContainerObjectsCount(cnrID string, objectType string, value uint64) {
+ m.mw.SetContainerObjectCounter(m.id, cnrID, objectType, value)
+}
+
+func (m *metricsWithID) IncContainerObjectsCount(cnrID string, objectType string) {
+ m.mw.IncContainerObjectCounter(m.id, cnrID, objectType)
+}
+
+func (m *metricsWithID) SubContainerObjectsCount(cnrID string, objectType string, value uint64) {
+ m.mw.SubContainerObjectCounter(m.id, cnrID, objectType, value)
+}
+
+func (m *metricsWithID) IncRefillObjectsCount(path string, size int, success bool) {
+ m.mw.IncRefillObjectsCount(m.id, path, size, success)
+}
+
+func (m *metricsWithID) SetRefillPercent(path string, percent uint32) {
+ m.mw.SetRefillPercent(m.id, path, percent)
+}
+
+func (m *metricsWithID) SetRefillStatus(path string, status string) {
+ m.mw.SetRefillStatus(m.id, path, status)
+}
+
+func (m *metricsWithID) SetEvacuationInProgress(value bool) {
+ m.mw.SetEvacuationInProgress(m.id, value)
+}
+
// AddShard adds a new shard to the storage engine.
//
// Returns any error encountered that did not allow adding a shard.
// Otherwise returns the ID of the added shard.
-func (e *StorageEngine) AddShard(opts ...shard.Option) (*shard.ID, error) {
- sh, err := e.createShard(opts)
+func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) {
+ sh, err := e.createShard(ctx, opts)
if err != nil {
- return nil, fmt.Errorf("could not create a shard: %w", err)
+ return nil, fmt.Errorf("create a shard: %w", err)
}
err = e.addShard(sh)
if err != nil {
- return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err)
+ return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err)
}
- if e.cfg.metrics != nil {
- e.cfg.metrics.SetReadonly(sh.ID().String(), sh.GetMode() != mode.ReadWrite)
- }
+ e.metrics.SetMode(sh.ID().String(), sh.GetMode())
return sh.ID(), nil
}
-func (e *StorageEngine) createShard(opts []shard.Option) (*shard.Shard, error) {
+func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) {
id, err := generateShardID()
if err != nil {
- return nil, fmt.Errorf("could not generate shard ID: %w", err)
+ return nil, fmt.Errorf("generate shard ID: %w", err)
}
- e.mtx.RLock()
-
- if e.metrics != nil {
- opts = append(opts, shard.WithMetricsWriter(
- &metricsWithID{
- id: id.String(),
- mw: e.metrics,
- },
- ))
- }
-
- e.mtx.RUnlock()
+ opts = e.appendMetrics(id, opts)
sh := shard.New(append(opts,
shard.WithID(id),
shard.WithExpiredTombstonesCallback(e.processExpiredTombstones),
shard.WithExpiredLocksCallback(e.processExpiredLocks),
shard.WithDeletedLockCallback(e.processDeletedLocks),
- shard.WithReportErrorFunc(e.reportShardErrorBackground),
+ shard.WithReportErrorFunc(e.reportShardErrorByID),
+ shard.WithZeroSizeCallback(e.processZeroSizeContainers),
+ shard.WithZeroCountCallback(e.processZeroCountContainers),
)...)
- if err := sh.UpdateID(); err != nil {
- return nil, fmt.Errorf("could not update shard ID: %w", err)
+ if err := sh.UpdateID(ctx); err != nil {
+ e.log.Warn(ctx, logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
}
- return sh, err
+ return sh, nil
+}
+
+func (e *StorageEngine) appendMetrics(id *shard.ID, opts []shard.Option) []shard.Option {
+ e.mtx.RLock()
+ defer e.mtx.RUnlock()
+
+ opts = append(opts,
+ shard.WithMetricsWriter(
+ &metricsWithID{
+ id: id.String(),
+ mw: e.metrics,
+ },
+ ),
+ shard.WithWriteCacheMetrics(
+ &writeCacheMetrics{
+ shardID: id.String(),
+ metrics: e.metrics.WriteCache(),
+ },
+ ),
+ shard.WithGCMetrics(
+ &gcMetrics{
+ storage: e.metrics.GC(),
+ shardID: id.String(),
+ },
+ ),
+ )
+
+ return opts
}
func (e *StorageEngine) addShard(sh *shard.Shard) error {
e.mtx.Lock()
defer e.mtx.Unlock()
- pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true))
- if err != nil {
- return fmt.Errorf("could not create pool: %w", err)
- }
-
strID := sh.ID().String()
if _, ok := e.shards[strID]; ok {
return fmt.Errorf("shard with id %s was already added", strID)
@@ -132,20 +187,18 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
e.shards[strID] = hashedShard{
shardWrapper: shardWrapper{
- errorCount: atomic.NewUint32(0),
+ errorCount: new(atomic.Uint32),
Shard: sh,
},
- hash: hrw.Hash([]byte(strID)),
+ hash: hrw.StringHash(strID),
}
- e.shardPools[strID] = pool
-
return nil
}
// removeShards removes specified shards. Skips non-existent shards.
// Logs errors about shards that it could not Close after the removal.
-func (e *StorageEngine) removeShards(ids ...string) {
+func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) {
if len(ids) == 0 {
return
}
@@ -159,24 +212,27 @@ func (e *StorageEngine) removeShards(ids ...string) {
continue
}
+ e.metrics.DeleteShardMetrics(id)
+
ss = append(ss, sh)
delete(e.shards, id)
- pool, ok := e.shardPools[id]
- if ok {
- pool.Release()
- delete(e.shardPools, id)
- }
-
- e.log.Info("shard has been removed",
+ e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
zap.String("id", id))
}
e.mtx.Unlock()
for _, sh := range ss {
- err := sh.Close()
+ err := sh.SetMode(ctx, mode.Disabled)
if err != nil {
- e.log.Error("could not close removed shard",
+ e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled,
+ zap.Stringer("id", sh.ID()),
+ zap.Error(err),
+ )
+ }
+ err = sh.Close(ctx)
+ if err != nil {
+ e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
@@ -198,26 +254,16 @@ func generateShardID() (*shard.ID, error) {
return shard.NewIDFromBytes(bin), nil
}
-func (e *StorageEngine) shardWeight(sh *shard.Shard) float64 {
- weightValues := sh.WeightValues()
-
- return float64(weightValues.FreeSpace)
-}
-
-func (e *StorageEngine) sortShardsByWeight(objAddr interface{ EncodeToString() string }) []hashedShard {
+func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string }) []hashedShard {
e.mtx.RLock()
defer e.mtx.RUnlock()
+ h := hrw.StringHash(objAddr.EncodeToString())
shards := make([]hashedShard, 0, len(e.shards))
- weights := make([]float64, 0, len(e.shards))
-
for _, sh := range e.shards {
- shards = append(shards, hashedShard(sh))
- weights = append(weights, e.shardWeight(sh.Shard))
+ shards = append(shards, sh)
}
-
- hrw.SortHasherSliceByWeightValue(shards, weights, hrw.Hash([]byte(objAddr.EncodeToString())))
-
+ hrw.SortHasherSliceByValue(shards, h)
return shards
}
@@ -228,32 +274,44 @@ func (e *StorageEngine) unsortedShards() []hashedShard {
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
- shards = append(shards, hashedShard(sh))
+ shards = append(shards, sh)
}
return shards
}
-func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) {
- for i, sh := range e.sortShardsByWeight(addr) {
+func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error {
+ for i, sh := range e.sortShards(addr) {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
if handler(i, sh) {
break
}
}
+ return nil
}
-func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) {
+func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error {
for _, sh := range e.unsortedShards() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
if handler(sh) {
break
}
}
+ return nil
}
// SetShardMode sets mode of the shard with provided identifier.
//
// Returns an error if shard mode was not set, or shard was not found in storage engine.
-func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounter bool) error {
+func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.Mode, resetErrorCounter bool) error {
e.mtx.RLock()
defer e.mtx.RUnlock()
@@ -261,8 +319,9 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte
if id.String() == shID {
if resetErrorCounter {
sh.errorCount.Store(0)
+ e.metrics.ClearErrorCounter(shID)
}
- return sh.SetMode(m)
+ return sh.SetMode(ctx, m)
}
}
@@ -270,17 +329,154 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte
}
// HandleNewEpoch notifies every shard about NewEpoch event.
-func (e *StorageEngine) HandleNewEpoch(epoch uint64) {
- ev := shard.EventNewEpoch(epoch)
-
+func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
e.mtx.RLock()
defer e.mtx.RUnlock()
for _, sh := range e.shards {
- sh.NotificationChannel() <- ev
+ select {
+ case <-ctx.Done():
+ return
+ case sh.NotificationChannel() <- epoch:
+ default:
+ e.log.Debug(ctx, logs.ShardEventProcessingInProgress,
+ zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID()))
+ }
}
}
+func (e *StorageEngine) DetachShards(ctx context.Context, ids []*shard.ID) error {
+ if len(ids) == 0 {
+ return logicerr.New("ids must be non-empty")
+ }
+
+ deletedShards, err := e.deleteShards(ctx, ids)
+ if err != nil {
+ return err
+ }
+
+ return e.closeShards(ctx, deletedShards)
+}
+
+// closeShards closes deleted shards. Tries to close all shards.
+// Returns single error with joined shard errors.
+func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedShard) error {
+ var multiErr error
+ var multiErrGuard sync.Mutex
+ var eg errgroup.Group
+ for _, sh := range deletedShards {
+ eg.Go(func() error {
+ err := sh.SetMode(ctx, mode.Disabled)
+ if err != nil {
+ e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled,
+ zap.Stringer("id", sh.ID()),
+ zap.Error(err),
+ )
+ multiErrGuard.Lock()
+ multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err))
+ multiErrGuard.Unlock()
+ }
+
+ err = sh.Close(ctx)
+ if err != nil {
+ e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard,
+ zap.Stringer("id", sh.ID()),
+ zap.Error(err),
+ )
+ multiErrGuard.Lock()
+ multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err))
+ multiErrGuard.Unlock()
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return err
+ }
+ return multiErr
+}
+
+// deleteShards deletes shards with specified ids from engine shard list
+// and releases all engine resources associated with shards.
+// Returns deleted shards or error if some shard could not be deleted.
+func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]hashedShard, error) {
+ ss := make([]hashedShard, 0, len(ids))
+
+ e.mtx.Lock()
+ defer e.mtx.Unlock()
+
+ for _, id := range ids {
+ idStr := id.String()
+ sh, found := e.shards[idStr]
+ if !found {
+ return nil, errShardNotFound
+ }
+ ss = append(ss, sh)
+ }
+
+ if len(ss) == len(e.shards) {
+ return nil, logicerr.New("could not delete all the shards")
+ }
+
+ for _, sh := range ss {
+ idStr := sh.ID().String()
+
+ e.metrics.DeleteShardMetrics(idStr)
+
+ delete(e.shards, idStr)
+
+ e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
+ zap.String("id", idStr))
+ }
+
+ return ss, nil
+}
+
func (s hashedShard) Hash() uint64 {
return s.hash
}
+
+func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) {
+ var err error
+ var info []shard.Info
+ prm := shard.ExistsPrm{
+ Address: obj,
+ }
+ var siErr *objectSDK.SplitInfoError
+ var ecErr *objectSDK.ECInfoError
+
+ if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) {
+ res, exErr := hs.Exists(ctx, prm)
+ if exErr != nil {
+ if client.IsErrObjectAlreadyRemoved(exErr) {
+ err = new(apistatus.ObjectAlreadyRemoved)
+ return true
+ }
+
+ // Check if error is either SplitInfoError or ECInfoError.
+ // True means the object is virtual.
+ if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) {
+ info = append(info, hs.DumpInfo())
+ return false
+ }
+
+ if shard.IsErrObjectExpired(exErr) {
+ err = exErr
+ return true
+ }
+
+ if !client.IsErrObjectNotFound(exErr) {
+ e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address))
+ }
+
+ return false
+ }
+ if res.Exists() {
+ info = append(info, hs.DumpInfo())
+ }
+ return false
+ }); itErr != nil {
+ return nil, itErr
+ }
+ return info, err
+}
diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go
index 67a006b5ab..3aa9629b05 100644
--- a/pkg/local_object_storage/engine/shards_test.go
+++ b/pkg/local_object_storage/engine/shards_test.go
@@ -1,42 +1,41 @@
package engine
import (
- "os"
+ "context"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/hrw"
"github.com/stretchr/testify/require"
)
func TestRemoveShard(t *testing.T) {
const numOfShards = 6
- e := testNewEngineWithShardNum(t, numOfShards)
- t.Cleanup(func() {
- e.Close()
- os.RemoveAll(t.Name())
- })
+ te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
+ e, ids := te.engine, te.shardIDs
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
- require.Equal(t, numOfShards, len(e.shardPools))
require.Equal(t, numOfShards, len(e.shards))
removedNum := numOfShards / 2
mSh := make(map[string]bool, numOfShards)
- for i, sh := range e.DumpInfo().Shards {
+ for i, id := range ids {
if i == removedNum {
break
}
- mSh[sh.ID.String()] = true
+ mSh[id.String()] = true
}
for id, remove := range mSh {
if remove {
- e.removeShards(id)
+ e.removeShards(context.Background(), id)
}
}
- require.Equal(t, numOfShards-removedNum, len(e.shardPools))
require.Equal(t, numOfShards-removedNum, len(e.shards))
for id, removed := range mSh {
@@ -44,3 +43,45 @@ func TestRemoveShard(t *testing.T) {
require.True(t, ok != removed)
}
}
+
+func TestDisableShards(t *testing.T) {
+ t.Parallel()
+
+ const numOfShards = 2
+
+ te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
+ e, ids := te.engine, te.shardIDs
+ defer func() { require.NoError(t, e.Close(context.Background())) }()
+
+ require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical))
+ require.ErrorAs(t, e.DetachShards(context.Background(), nil), new(logicerr.Logical))
+ require.ErrorAs(t, e.DetachShards(context.Background(), []*shard.ID{}), new(logicerr.Logical))
+
+ require.NoError(t, e.DetachShards(context.Background(), []*shard.ID{ids[0]}))
+
+ require.Equal(t, 1, len(e.shards))
+}
+
+func TestSortShardsByWeight(t *testing.T) {
+ t.Parallel()
+
+ const numOfShards = 500
+
+ var shards1 []hashedShard
+ var weights1 []float64
+ var shards2 []hashedShard
+ for i := range numOfShards {
+ shards1 = append(shards1, hashedShard{
+ hash: uint64(i),
+ })
+ weights1 = append(weights1, 0)
+ shards2 = append(shards2, hashedShard{
+ hash: uint64(i),
+ })
+ }
+
+ hrw.SortHasherSliceByWeightValue(shards1, weights1, 0)
+ hrw.SortHasherSliceByValue(shards2, 0)
+
+ require.Equal(t, shards1, shards2)
+}
diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go
index 0d6f49eeab..cfd15b4d4f 100644
--- a/pkg/local_object_storage/engine/tree.go
+++ b/pkg/local_object_storage/engine/tree.go
@@ -1,27 +1,42 @@
package engine
import (
+ "context"
"errors"
+ "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
var _ pilorama.Forest = (*StorageEngine)(nil)
// TreeMove implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
- index, lst, err := e.getTreeShard(d.CID, treeID)
+func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeMove",
+ trace.WithAttributes(
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, d.CID, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return nil, err
}
- lm, err := lst[index].TreeMove(d, treeID, m)
+ lm, err := lst[index].TreeMove(ctx, d, treeID, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(lst[index], "can't perform `TreeMove`", err,
+ e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err,
zap.Stringer("cid", d.CID),
zap.String("tree", treeID))
}
@@ -32,16 +47,29 @@ func (e *StorageEngine) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pil
}
// TreeAddByPath implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr string, path []string, m []pilorama.KeyValue) ([]pilorama.Move, error) {
- index, lst, err := e.getTreeShard(d.CID, treeID)
+func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, treeID string, attr string, path []string, m []pilorama.KeyValue) ([]pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeAddByPath",
+ trace.WithAttributes(
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Int("meta_count", len(m)),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, d.CID, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return nil, err
}
- lm, err := lst[index].TreeAddByPath(d, treeID, attr, path, m)
+ lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err,
+ e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err,
zap.Stringer("cid", d.CID),
zap.String("tree", treeID))
}
@@ -51,17 +79,53 @@ func (e *StorageEngine) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, a
}
// TreeApply implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeApply(d pilorama.CIDDescriptor, treeID string, m *pilorama.Move, backgroundSync bool) error {
- index, lst, err := e.getTreeShard(d.CID, treeID)
+func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApply",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.Bool("background", backgroundSync),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, cnr, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return err
}
- err = lst[index].TreeApply(d, treeID, m, backgroundSync)
+ err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(lst[index], "can't perform `TreeApply`", err,
- zap.Stringer("cid", d.CID),
+ e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err,
+ zap.Stringer("cid", cnr),
+ zap.String("tree", treeID))
+ }
+ return err
+ }
+ return nil
+}
+
+// TreeApplyBatch implements the pilorama.Forest interface.
+func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApplyBatch",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, cnr, treeID)
+ if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
+ return err
+ }
+
+ err = lst[index].TreeApplyBatch(ctx, cnr, treeID, m)
+ if err != nil {
+ if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
+ e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err,
+ zap.Stringer("cid", cnr),
zap.String("tree", treeID))
}
return err
@@ -70,17 +134,28 @@ func (e *StorageEngine) TreeApply(d pilorama.CIDDescriptor, treeID string, m *pi
}
// TreeGetByPath implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
+func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetByPath",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Bool("latest", latest),
+ ),
+ )
+ defer span.End()
+
var err error
var nodes []pilorama.Node
- for _, sh := range e.sortShardsByWeight(cid) {
- nodes, err = sh.TreeGetByPath(cid, treeID, attr, path, latest)
+ for _, sh := range e.sortShards(cid) {
+ nodes, err = sh.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't perform `TreeGetByPath`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err,
zap.Stringer("cid", cid),
zap.String("tree", treeID))
}
@@ -92,18 +167,27 @@ func (e *StorageEngine) TreeGetByPath(cid cidSDK.ID, treeID string, attr string,
}
// TreeGetMeta implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
+func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetMeta",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
+ ),
+ )
+ defer span.End()
+
var err error
var m pilorama.Meta
var p uint64
- for _, sh := range e.sortShardsByWeight(cid) {
- m, p, err = sh.TreeGetMeta(cid, treeID, nodeID)
+ for _, sh := range e.sortShards(cid) {
+ m, p, err = sh.TreeGetMeta(ctx, cid, treeID, nodeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't perform `TreeGetMeta`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err,
zap.Stringer("cid", cid),
zap.String("tree", treeID))
}
@@ -115,17 +199,26 @@ func (e *StorageEngine) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID piloram
}
// TreeGetChildren implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) {
+func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]pilorama.NodeInfo, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetChildren",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
+ ),
+ )
+ defer span.End()
+
var err error
- var nodes []uint64
- for _, sh := range e.sortShardsByWeight(cid) {
- nodes, err = sh.TreeGetChildren(cid, treeID, nodeID)
+ var nodes []pilorama.NodeInfo
+ for _, sh := range e.sortShards(cid) {
+ nodes, err = sh.TreeGetChildren(ctx, cid, treeID, nodeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't perform `TreeGetChildren`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err,
zap.Stringer("cid", cid),
zap.String("tree", treeID))
}
@@ -136,18 +229,58 @@ func (e *StorageEngine) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pil
return nil, err
}
-// TreeGetOpLog implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
+// TreeSortedByFilename implements the pilorama.Forest interface.
+func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
var err error
- var lm pilorama.Move
- for _, sh := range e.sortShardsByWeight(cid) {
- lm, err = sh.TreeGetOpLog(cid, treeID, height)
+ var nodes []pilorama.MultiNodeInfo
+ var cursor *pilorama.Cursor
+ for _, sh := range e.sortShards(cid) {
+ nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't perform `TreeGetOpLog`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err,
+ zap.Stringer("cid", cid),
+ zap.String("tree", treeID))
+ }
+ continue
+ }
+ return nodes, cursor, nil
+ }
+ return nil, last, err
+}
+
+// TreeGetOpLog implements the pilorama.Forest interface.
+func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetOpLog",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", strconv.FormatUint(height, 10)),
+ ),
+ )
+ defer span.End()
+
+ var err error
+ var lm pilorama.Move
+ for _, sh := range e.sortShards(cid) {
+ lm, err = sh.TreeGetOpLog(ctx, cid, treeID, height)
+ if err != nil {
+ if err == shard.ErrPiloramaDisabled {
+ break
+ }
+ if !errors.Is(err, pilorama.ErrTreeNotFound) {
+ e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err,
zap.Stringer("cid", cid),
zap.String("tree", treeID))
}
@@ -159,16 +292,24 @@ func (e *StorageEngine) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64
}
// TreeDrop implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeDrop(cid cidSDK.ID, treeID string) error {
+func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeDrop",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
var err error
- for _, sh := range e.sortShardsByWeight(cid) {
- err = sh.TreeDrop(cid, treeID)
+ for _, sh := range e.sortShards(cid) {
+ err = sh.TreeDrop(ctx, cid, treeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) {
- e.reportShardError(sh, "can't perform `TreeDrop`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err,
zap.Stringer("cid", cid),
zap.String("tree", treeID))
}
@@ -180,17 +321,24 @@ func (e *StorageEngine) TreeDrop(cid cidSDK.ID, treeID string) error {
}
// TreeList implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeList(cid cidSDK.ID) ([]string, error) {
+func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeList",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ ),
+ )
+ defer span.End()
+
var resIDs []string
for _, sh := range e.unsortedShards() {
- ids, err := sh.TreeList(cid)
+ ids, err := sh.TreeList(ctx, cid)
if err != nil {
if errors.Is(err, shard.ErrPiloramaDisabled) || errors.Is(err, shard.ErrReadOnlyMode) {
return nil, err
}
- e.reportShardError(sh, "can't perform `TreeList`", err,
+ e.reportShardError(ctx, sh, "can't perform `TreeList`", err,
zap.Stringer("cid", cid))
// returns as much info about
@@ -205,24 +353,57 @@ func (e *StorageEngine) TreeList(cid cidSDK.ID) ([]string, error) {
}
// TreeExists implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
- _, _, err := e.getTreeShard(cid, treeID)
+func (e *StorageEngine) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeExists",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ _, _, err := e.getTreeShard(ctx, cid, treeID)
if errors.Is(err, pilorama.ErrTreeNotFound) {
return false, nil
}
return err == nil, err
}
+func (e *StorageEngine) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, cid, treeID)
+ if err != nil {
+ return 0, nil
+ }
+ return lst[index].TreeHeight(ctx, cid, treeID)
+}
+
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
- index, lst, err := e.getTreeShard(cid, treeID)
+func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeUpdateLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", strconv.FormatUint(height, 10)),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, cid, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return err
}
- err = lst[index].TreeUpdateLastSyncHeight(cid, treeID, height)
+ err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(lst[index], "can't update tree synchronization height", err,
+ e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err,
zap.Stringer("cid", cid),
zap.String("tree", treeID))
}
@@ -230,17 +411,25 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, h
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
+func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
var err error
var height uint64
- for _, sh := range e.sortShardsByWeight(cid) {
- height, err = sh.TreeLastSyncHeight(cid, treeID)
+ for _, sh := range e.sortShards(cid) {
+ height, err = sh.TreeLastSyncHeight(ctx, cid, treeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(sh, "can't read tree synchronization height", err,
+ e.reportShardError(ctx, sh, "can't read tree synchronization height", err,
zap.Stringer("cid", cid),
zap.String("tree", treeID))
}
@@ -251,10 +440,10 @@ func (e *StorageEngine) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64
return height, err
}
-func (e *StorageEngine) getTreeShard(cid cidSDK.ID, treeID string) (int, []hashedShard, error) {
- lst := e.sortShardsByWeight(cid)
+func (e *StorageEngine) getTreeShard(ctx context.Context, cid cidSDK.ID, treeID string) (int, []hashedShard, error) {
+ lst := e.sortShards(cid)
for i, sh := range lst {
- exists, err := sh.TreeExists(cid, treeID)
+ exists, err := sh.TreeExists(ctx, cid, treeID)
if err != nil {
return 0, nil, err
}
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 0312e01806..ea0a9e74ea 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -1,12 +1,15 @@
package engine
import (
+ "context"
"strconv"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/stretchr/testify/require"
)
func BenchmarkTreeVsSearch(b *testing.B) {
@@ -22,20 +25,24 @@ func BenchmarkTreeVsSearch(b *testing.B) {
}
func benchmarkTreeVsSearch(b *testing.B, objCount int) {
- e, _, _ := newEngineWithErrorThreshold(b, "", 0)
+ te := newEngineWithErrorThreshold(b, "", 0)
+ defer func() {
+ require.NoError(b, te.ng.Close(context.Background()))
+ }()
+
cid := cidtest.ID()
d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1}
treeID := "someTree"
- for i := 0; i < objCount; i++ {
- obj := generateObjectWithCID(b, cid)
- addAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
- err := Put(e, obj)
+ for i := range objCount {
+ obj := testutil.GenerateObjectWithCID(cid)
+ testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
+ err := Put(context.Background(), te.ng, obj, false)
if err != nil {
b.Fatal(err)
}
- _, err = e.TreeAddByPath(d, treeID, pilorama.AttributeFilename, nil,
- []pilorama.KeyValue{{pilorama.AttributeFilename, []byte(strconv.Itoa(i))}})
+ _, err = te.ng.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, nil,
+ []pilorama.KeyValue{{Key: pilorama.AttributeFilename, Value: []byte(strconv.Itoa(i))}})
if err != nil {
b.Fatal(err)
}
@@ -43,14 +50,14 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
b.Run("search", func(b *testing.B) {
var prm SelectPrm
- prm.WithContainerID(cid)
+ prm.WithContainerID(cid, true)
- var fs object.SearchFilters
- fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), object.MatchStringEqual)
+ var fs objectSDK.SearchFilters
+ fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
prm.WithFilters(fs)
- for i := 0; i < b.N; i++ {
- res, err := e.Select(prm)
+ for range b.N {
+ res, err := te.ng.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
}
@@ -60,8 +67,8 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
}
})
b.Run("TreeGetByPath", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- nodes, err := e.TreeGetByPath(cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
+ for range b.N {
+ nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
if err != nil {
b.Fatal(err)
}
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index 26600a3eb3..e9ba3410f5 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -1,13 +1,25 @@
package engine
import (
+ "context"
+ "sync"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "golang.org/x/sync/errgroup"
)
// FlushWriteCachePrm groups the parameters of FlushWriteCache operation.
type FlushWriteCachePrm struct {
shardID *shard.ID
ignoreErrors bool
+ seal bool
}
// SetShardID is an option to set shard ID.
@@ -17,16 +29,29 @@ func (p *FlushWriteCachePrm) SetShardID(id *shard.ID) {
p.shardID = id
}
-// SetIgnoreErrors sets errors ignore flag..
+// SetIgnoreErrors sets errors ignore flag.
func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) {
p.ignoreErrors = ignore
}
+// SetSeal sets seal flag.
+func (p *FlushWriteCachePrm) SetSeal(v bool) {
+ p.seal = v
+}
+
// FlushWriteCacheRes groups the resulting values of FlushWriteCache operation.
type FlushWriteCacheRes struct{}
// FlushWriteCache flushes write-cache on a single shard.
-func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRes, error) {
+func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) (FlushWriteCacheRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.FlushWriteCache",
+ trace.WithAttributes(
+ attribute.String("shard_id", p.shardID.String()),
+ attribute.Bool("ignore_errors", p.ignoreErrors),
+ attribute.Bool("seal", p.seal),
+ ))
+ defer span.End()
+
e.mtx.RLock()
sh, ok := e.shards[p.shardID.String()]
e.mtx.RUnlock()
@@ -37,6 +62,133 @@ func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRe
var prm shard.FlushWriteCachePrm
prm.SetIgnoreErrors(p.ignoreErrors)
+ prm.SetSeal(p.seal)
- return FlushWriteCacheRes{}, sh.FlushWriteCache(prm)
+ return FlushWriteCacheRes{}, sh.FlushWriteCache(ctx, prm)
+}
+
+type SealWriteCachePrm struct {
+ ShardIDs []*shard.ID
+ IgnoreErrors bool
+ Async bool
+ RestoreMode bool
+ Shrink bool
+}
+
+type ShardSealResult struct {
+ ShardID *shard.ID
+ Success bool
+ ErrorMsg string
+}
+
+type SealWriteCacheRes struct {
+ ShardResults []ShardSealResult
+}
+
+// SealWriteCache flushed all data to blobstore and moves write-cache to degraded read only mode.
+func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePrm) (SealWriteCacheRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.SealWriteCache",
+ trace.WithAttributes(
+ attribute.Int("shard_id_count", len(prm.ShardIDs)),
+ attribute.Bool("ignore_errors", prm.IgnoreErrors),
+ attribute.Bool("restore_mode", prm.RestoreMode),
+ ))
+ defer span.End()
+
+ res := SealWriteCacheRes{
+ ShardResults: make([]ShardSealResult, 0, len(prm.ShardIDs)),
+ }
+ resGuard := &sync.Mutex{}
+
+ eg, egCtx := errgroup.WithContext(ctx)
+ for _, shardID := range prm.ShardIDs {
+ eg.Go(func() error {
+ e.mtx.RLock()
+ sh, ok := e.shards[shardID.String()]
+ e.mtx.RUnlock()
+
+ if !ok {
+ resGuard.Lock()
+ defer resGuard.Unlock()
+ res.ShardResults = append(res.ShardResults, ShardSealResult{
+ ShardID: shardID,
+ ErrorMsg: errShardNotFound.Error(),
+ })
+ return nil
+ }
+
+ err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors, Async: prm.Async, RestoreMode: prm.RestoreMode, Shrink: prm.Shrink})
+
+ resGuard.Lock()
+ defer resGuard.Unlock()
+
+ if err != nil {
+ res.ShardResults = append(res.ShardResults, ShardSealResult{
+ ShardID: shardID,
+ ErrorMsg: err.Error(),
+ })
+ } else {
+ res.ShardResults = append(res.ShardResults, ShardSealResult{
+ ShardID: shardID,
+ Success: true,
+ })
+ }
+ return nil
+ })
+ }
+
+ if err := eg.Wait(); err != nil {
+ return SealWriteCacheRes{}, err
+ }
+ return res, nil
+}
+
+type writeCacheMetrics struct {
+ shardID string
+ metrics metrics.WriteCacheMetrics
+ path string
+}
+
+func (m *writeCacheMetrics) SetPath(path string) {
+ m.path = path
+}
+
+func (m *writeCacheMetrics) SetShardID(id string) {
+ m.shardID = id
+}
+
+func (m *writeCacheMetrics) Get(d time.Duration, success bool, st writecache.StorageType) {
+ m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Get", success, d)
+}
+
+func (m *writeCacheMetrics) Delete(d time.Duration, success bool, st writecache.StorageType) {
+ m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Delete", success, d)
+}
+
+func (m *writeCacheMetrics) Put(d time.Duration, success bool, st writecache.StorageType) {
+ m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Put", success, d)
+}
+
+func (m *writeCacheMetrics) SetEstimateSize(size uint64) {
+ m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeFSTree.String(), size)
+}
+
+func (m *writeCacheMetrics) SetMode(mod mode.ComponentMode) {
+ m.metrics.SetMode(m.shardID, mod.String())
+}
+
+func (m *writeCacheMetrics) SetActualCounters(count uint64) {
+ m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeFSTree.String(), count)
+}
+
+func (m *writeCacheMetrics) Flush(success bool, st writecache.StorageType) {
+ m.metrics.IncOperationCounter(m.shardID, m.path, st.String(), "Flush", metrics.NullBool{Bool: success, Valid: true})
+}
+
+func (m *writeCacheMetrics) Evict(st writecache.StorageType) {
+ m.metrics.IncOperationCounter(m.shardID, m.path, st.String(), "Evict", metrics.NullBool{})
+}
+
+func (m *writeCacheMetrics) Close() {
+ m.metrics.Close(m.shardID, m.path)
}
diff --git a/pkg/local_object_storage/internal/log/log.go b/pkg/local_object_storage/internal/log/log.go
index 82024ffa7f..6b101fa605 100644
--- a/pkg/local_object_storage/internal/log/log.go
+++ b/pkg/local_object_storage/internal/log/log.go
@@ -1,16 +1,16 @@
package storagelog
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
-// headMsg is a distinctive part of all messages.
-const headMsg = "local object storage operation"
-
// Write writes message about storage engine's operation to logger.
-func Write(logger *logger.Logger, fields ...zap.Field) {
- logger.Info(headMsg, fields...)
+func Write(ctx context.Context, logger *logger.Logger, fields ...zap.Field) {
+ logger.Debug(ctx, logs.StorageOperation, fields...)
}
// AddressField returns logger's field for object address.
diff --git a/pkg/local_object_storage/internal/metaerr/error.go b/pkg/local_object_storage/internal/metaerr/error.go
new file mode 100644
index 0000000000..41b8504bc3
--- /dev/null
+++ b/pkg/local_object_storage/internal/metaerr/error.go
@@ -0,0 +1,33 @@
+package metaerr
+
+import "errors"
+
+// Error is a wrapper for SSD-related errors.
+// In our model it unites metabase, pilorama and write-cache errors.
+type Error struct {
+ err error
+}
+
+// New returns simple error with a provided error message.
+func New(msg string) Error {
+ return Error{err: errors.New(msg)}
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+ return e.err.Error()
+}
+
+// Wrap wraps arbitrary error.
+// Returns nil if err == nil.
+func Wrap(err error) error {
+ if err != nil {
+ return Error{err: err}
+ }
+ return nil
+}
+
+// Unwrap returns underlying error.
+func (e Error) Unwrap() error {
+ return e.err
+}
diff --git a/pkg/local_object_storage/internal/metaerr/error_test.go b/pkg/local_object_storage/internal/metaerr/error_test.go
new file mode 100644
index 0000000000..acde48793c
--- /dev/null
+++ b/pkg/local_object_storage/internal/metaerr/error_test.go
@@ -0,0 +1,68 @@
+package metaerr
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestError(t *testing.T) {
+ t.Run("errors.Is", func(t *testing.T) {
+ e1 := errors.New("some error")
+ ee := Wrap(e1)
+ require.ErrorIs(t, ee, e1)
+
+ e2 := fmt.Errorf("wrap: %w", e1)
+ ee = Wrap(e2)
+ require.ErrorIs(t, ee, e1)
+ require.ErrorIs(t, ee, e2)
+
+ require.Equal(t, errors.Unwrap(ee), e2)
+ })
+
+ t.Run("errors.As", func(t *testing.T) {
+ e1 := testError{42}
+ ee := Wrap(e1)
+
+ {
+ var actual testError
+ require.ErrorAs(t, ee, &actual)
+ require.Equal(t, e1.data, actual.data)
+ }
+ {
+ var actual Error
+ require.ErrorAs(t, ee, &actual)
+ require.Equal(t, e1, actual.err)
+ }
+
+ e2 := fmt.Errorf("wrap: %w", e1)
+ ee = Wrap(e2)
+
+ {
+ var actual testError
+ require.ErrorAs(t, ee, &actual)
+ require.Equal(t, e1.data, actual.data)
+ }
+ })
+}
+
+func TestNilWrap(t *testing.T) {
+ require.NoError(t, Wrap(nil))
+}
+
+func TestErrorMessage(t *testing.T) {
+ msg := "sth to report"
+ err := New(msg)
+ require.Contains(t, err.Error(), msg)
+}
+
+type testError struct {
+ data uint64
+}
+
+func (e testError) Error() string {
+ return strconv.FormatUint(e.data, 10)
+}
diff --git a/pkg/local_object_storage/internal/storagetest/storage.go b/pkg/local_object_storage/internal/storagetest/storage.go
index 74400a9811..d46365296b 100644
--- a/pkg/local_object_storage/internal/storagetest/storage.go
+++ b/pkg/local_object_storage/internal/storagetest/storage.go
@@ -1,6 +1,7 @@
package storagetest
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -9,10 +10,10 @@ import (
// Component represents single storage component.
type Component interface {
- Open(bool) error
- SetMode(mode.Mode) error
- Init() error
- Close() error
+ Open(context.Context, mode.Mode) error
+ SetMode(context.Context, mode.Mode) error
+ Init(context.Context) error
+ Close(context.Context) error
}
// Constructor constructs storage component.
@@ -57,19 +58,19 @@ func TestCloseAfterOpen(t *testing.T, cons Constructor) {
t.Run("RW", func(t *testing.T) {
// Use-case: irrecoverable error on some components, close everything.
s := cons(t)
- require.NoError(t, s.Open(false))
- require.NoError(t, s.Close())
+ require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, s.Close(context.Background()))
})
t.Run("RO", func(t *testing.T) {
// Use-case: irrecoverable error on some components, close everything.
// Open in read-only must be done after the db is here.
s := cons(t)
- require.NoError(t, s.Open(false))
- require.NoError(t, s.Init())
- require.NoError(t, s.Close())
+ require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, s.Init(context.Background()))
+ require.NoError(t, s.Close(context.Background()))
- require.NoError(t, s.Open(true))
- require.NoError(t, s.Close())
+ require.NoError(t, s.Open(context.Background(), mode.ReadOnly))
+ require.NoError(t, s.Close(context.Background()))
})
}
@@ -77,10 +78,10 @@ func TestCloseAfterOpen(t *testing.T, cons Constructor) {
func TestCloseTwice(t *testing.T, cons Constructor) {
// Use-case: move to maintenance mode twice, first time failed.
s := cons(t)
- require.NoError(t, s.Open(false))
- require.NoError(t, s.Init())
- require.NoError(t, s.Close())
- require.NoError(t, s.Close()) // already closed, no-op
+ require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, s.Init(context.Background()))
+ require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Close(context.Background())) // already closed, no-op
}
// TestSetMode checks that any mode transition can be done safely.
@@ -89,33 +90,33 @@ func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) {
// Use-case: metabase `Init` failed,
// call `SetMode` on all not-yet-initialized components.
s := cons(t)
- require.NoError(t, s.Open(false))
- require.NoError(t, s.SetMode(m))
+ require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, s.SetMode(context.Background(), m))
t.Run("after open in RO", func(t *testing.T) {
- require.NoError(t, s.Close())
- require.NoError(t, s.Open(true))
- require.NoError(t, s.SetMode(m))
+ require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Open(context.Background(), mode.ReadOnly))
+ require.NoError(t, s.SetMode(context.Background(), m))
})
- require.NoError(t, s.Close())
+ require.NoError(t, s.Close(context.Background()))
})
t.Run("after init", func(t *testing.T) {
s := cons(t)
// Use-case: notmal node operation.
- require.NoError(t, s.Open(false))
- require.NoError(t, s.Init())
- require.NoError(t, s.SetMode(m))
- require.NoError(t, s.Close())
+ require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, s.Init(context.Background()))
+ require.NoError(t, s.SetMode(context.Background(), m))
+ require.NoError(t, s.Close(context.Background()))
})
}
func TestModeTransition(t *testing.T, cons Constructor, from, to mode.Mode) {
// Use-case: normal node operation.
s := cons(t)
- require.NoError(t, s.Open(false))
- require.NoError(t, s.Init())
- require.NoError(t, s.SetMode(from))
- require.NoError(t, s.SetMode(to))
- require.NoError(t, s.Close())
+ require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, s.Init(context.Background()))
+ require.NoError(t, s.SetMode(context.Background(), from))
+ require.NoError(t, s.SetMode(context.Background(), to))
+ require.NoError(t, s.Close(context.Background()))
}
diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go
new file mode 100644
index 0000000000..52b199b0b5
--- /dev/null
+++ b/pkg/local_object_storage/internal/testutil/generators.go
@@ -0,0 +1,114 @@
+package testutil
+
+import (
+ cryptorand "crypto/rand"
+ "encoding/binary"
+ "math/rand"
+ "sync/atomic"
+ "testing"
+
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+)
+
+// AddressGenerator is the interface of types that generate object addresses.
+type AddressGenerator interface {
+ Next() oid.Address
+}
+
+// SeqAddrGenerator is an AddressGenerator that generates addresses sequentially and wraps around the given max ID.
+type SeqAddrGenerator struct {
+ cnt atomic.Uint64
+ MaxID uint64
+}
+
+var _ AddressGenerator = &SeqAddrGenerator{}
+
+func (g *SeqAddrGenerator) Next() oid.Address {
+ var id oid.ID
+ binary.LittleEndian.PutUint64(id[:], ((g.cnt.Add(1)-1)%g.MaxID)+1)
+ var addr oid.Address
+ addr.SetContainer(cid.ID{})
+ addr.SetObject(id)
+ return addr
+}
+
+// RandAddrGenerator is an addressGenerator that generates random addresses in the given range.
+type RandAddrGenerator uint64
+
+func (g RandAddrGenerator) Next() oid.Address {
+ var id oid.ID
+ binary.LittleEndian.PutUint64(id[:], uint64(1+int(rand.Int63n(int64(g)))))
+ var addr oid.Address
+ addr.SetContainer(cid.ID{})
+ addr.SetObject(id)
+ return addr
+}
+
+// ObjectGenerator is the interface of types that generate object entries.
+type ObjectGenerator interface {
+ Next() *objectSDK.Object
+}
+
+// SeqObjGenerator is an ObjectGenerator that generates entries with random payloads of size objSize and sequential IDs.
+type SeqObjGenerator struct {
+ cnt atomic.Uint64
+ ObjSize uint64
+}
+
+var _ ObjectGenerator = &SeqObjGenerator{}
+
+func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object {
+ data := make([]byte, sz)
+ _, _ = cryptorand.Read(data)
+ obj := GenerateObjectWithCIDWithPayload(cid, data)
+ obj.SetID(oid)
+ return obj
+}
+
+func (g *SeqObjGenerator) Next() *objectSDK.Object {
+ var id oid.ID
+ binary.LittleEndian.PutUint64(id[:], g.cnt.Add(1))
+ return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize)
+}
+
+// RandObjGenerator is an ObjectGenerator that generates entries with random IDs and payloads of size objSize.
+type RandObjGenerator struct {
+ ObjSize uint64
+}
+
+var _ ObjectGenerator = &RandObjGenerator{}
+
+func (g *RandObjGenerator) Next() *objectSDK.Object {
+ var id oid.ID
+ _, _ = cryptorand.Read(id[:])
+ return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize)
+}
+
+// OverwriteObjGenerator is an ObjectGenerator that generates entries with random payloads of size objSize and at most maxObjects distinct IDs.
+type OverwriteObjGenerator struct {
+ ObjSize uint64
+ MaxObjects uint64
+}
+
+func (g *OverwriteObjGenerator) Next() *objectSDK.Object {
+ var id oid.ID
+ binary.LittleEndian.PutUint64(id[:], uint64(1+rand.Int63n(int64(g.MaxObjects))))
+ return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize)
+}
+
+func AddressFromObject(t testing.TB, obj *objectSDK.Object) oid.Address {
+ var addr oid.Address
+
+ id, isSet := obj.ID()
+ require.True(t, isSet, "object ID is not set")
+ addr.SetObject(id)
+
+ cid, isSet := obj.ContainerID()
+ require.True(t, isSet, "container ID is not set")
+ addr.SetContainer(cid)
+
+ return addr
+}
diff --git a/pkg/local_object_storage/internal/testutil/generators_test.go b/pkg/local_object_storage/internal/testutil/generators_test.go
new file mode 100644
index 0000000000..cc6f726a40
--- /dev/null
+++ b/pkg/local_object_storage/internal/testutil/generators_test.go
@@ -0,0 +1,70 @@
+package testutil
+
+import (
+ "encoding/binary"
+ "slices"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestOverwriteObjGenerator(t *testing.T) {
+ gen := &OverwriteObjGenerator{
+ ObjSize: 10,
+ MaxObjects: 4,
+ }
+ for range 40 {
+ obj := gen.Next()
+ id, isSet := obj.ID()
+ i := binary.LittleEndian.Uint64(id[:])
+
+ require.True(t, isSet)
+ require.Equal(t, gen.ObjSize, uint64(len(obj.Payload())))
+ require.True(t, 1 <= i && i <= gen.MaxObjects)
+ }
+}
+
+func TestRandObjGenerator(t *testing.T) {
+ gen := &RandObjGenerator{ObjSize: 10}
+ for range 10 {
+ obj := gen.Next()
+
+ require.Equal(t, gen.ObjSize, uint64(len(obj.Payload())))
+ }
+}
+
+func TestSeqObjGenerator(t *testing.T) {
+ gen := &SeqObjGenerator{ObjSize: 10}
+ var addrs []string
+ for i := 1; i <= 10; i++ {
+ obj := gen.Next()
+ id, isSet := obj.ID()
+ addrs = append(addrs, AddressFromObject(t, obj).EncodeToString())
+
+ require.True(t, isSet)
+ require.Equal(t, gen.ObjSize, uint64(len(obj.Payload())))
+ require.Equal(t, uint64(i), binary.LittleEndian.Uint64(id[:]))
+ }
+ require.True(t, slices.IsSorted(addrs))
+}
+
+func TestRandAddrGenerator(t *testing.T) {
+ gen := RandAddrGenerator(5)
+ for range 50 {
+ addr := gen.Next()
+ id := addr.Object()
+ k := binary.LittleEndian.Uint64(id[:])
+
+ require.True(t, 1 <= k && k <= uint64(gen))
+ }
+}
+
+func TestSeqAddrGenerator(t *testing.T) {
+ gen := &SeqAddrGenerator{MaxID: 10}
+ for i := 1; i <= 20; i++ {
+ addr := gen.Next()
+ id := addr.Object()
+
+ require.Equal(t, uint64((i-1)%int(gen.MaxID)+1), binary.LittleEndian.Uint64(id[:]))
+ }
+}
diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go
new file mode 100644
index 0000000000..1087e40be5
--- /dev/null
+++ b/pkg/local_object_storage/internal/testutil/object.go
@@ -0,0 +1,75 @@
+package testutil
+
+import (
+ "crypto/rand"
+ "crypto/sha256"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+)
+
+const defaultDataSize = 32
+
+func GenerateObject() *objectSDK.Object {
+ return GenerateObjectWithCID(cidtest.ID())
+}
+
+func GenerateObjectWithSize(sz int) *objectSDK.Object {
+ data := make([]byte, sz)
+ _, _ = rand.Read(data)
+ return GenerateObjectWithCIDWithPayload(cidtest.ID(), data)
+}
+
+func GenerateObjectWithCID(cnr cid.ID) *objectSDK.Object {
+ data := make([]byte, defaultDataSize)
+ _, _ = rand.Read(data)
+ return GenerateObjectWithCIDWithPayload(cnr, data)
+}
+
+func GenerateObjectWithCIDWithPayload(cnr cid.ID, data []byte) *objectSDK.Object {
+ var ver version.Version
+ ver.SetMajor(2)
+ ver.SetMinor(1)
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(data))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ obj := objectSDK.New()
+ obj.SetID(oidtest.ID())
+ obj.SetOwnerID(usertest.ID())
+ obj.SetContainerID(cnr)
+ obj.SetVersion(&ver)
+ obj.SetPayload(data)
+ obj.SetPayloadSize(uint64(len(data)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+
+ return obj
+}
+
+func AddAttribute(obj *objectSDK.Object, key, val string) {
+ var attr objectSDK.Attribute
+ attr.SetKey(key)
+ attr.SetValue(val)
+
+ attrs := obj.Attributes()
+ attrs = append(attrs, attr)
+ obj.SetAttributes(attrs...)
+}
+
+func AddPayload(obj *objectSDK.Object, size int) {
+ buf := make([]byte, size)
+ _, _ = rand.Read(buf)
+
+ obj.SetPayload(buf)
+ obj.SetPayloadSize(uint64(size))
+}
diff --git a/pkg/local_object_storage/metabase/VERSION.md b/pkg/local_object_storage/metabase/VERSION.md
index 97e514db1f..9cfc95332f 100644
--- a/pkg/local_object_storage/metabase/VERSION.md
+++ b/pkg/local_object_storage/metabase/VERSION.md
@@ -2,6 +2,8 @@
This file describes changes between the metabase versions.
+Warning: database schema below is outdated and incomplete, see source code.
+
## Current
### Primary buckets
@@ -86,6 +88,11 @@ This file describes changes between the metabase versions.
# History
+## Version 3
+
+- Payload hash, owner ID and FKBT buckets deleted
+- Expiration epoch to object ID and object ID to expiration epoch added
+
## Version 2
- Container ID is encoded as 32-byte slice
diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go
new file mode 100644
index 0000000000..de1479e6fa
--- /dev/null
+++ b/pkg/local_object_storage/metabase/bucket_cache.go
@@ -0,0 +1,82 @@
+package meta
+
+import (
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "go.etcd.io/bbolt"
+)
+
+type bucketCache struct {
+ locked *bbolt.Bucket
+ graveyard *bbolt.Bucket
+ garbage *bbolt.Bucket
+ expired map[cid.ID]*bbolt.Bucket
+ primary map[cid.ID]*bbolt.Bucket
+}
+
+func newBucketCache() *bucketCache {
+ return &bucketCache{}
+}
+
+func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
+ if bc == nil {
+ return tx.Bucket(bucketNameLocked)
+ }
+ return getBucket(&bc.locked, tx, bucketNameLocked)
+}
+
+func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
+ if bc == nil {
+ return tx.Bucket(graveyardBucketName)
+ }
+ return getBucket(&bc.graveyard, tx, graveyardBucketName)
+}
+
+func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
+ if bc == nil {
+ return tx.Bucket(garbageBucketName)
+ }
+ return getBucket(&bc.garbage, tx, garbageBucketName)
+}
+
+func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket {
+ if *cache != nil {
+ return *cache
+ }
+
+ *cache = tx.Bucket(name)
+ return *cache
+}
+
+func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
+ if bc == nil {
+ bucketName := make([]byte, bucketKeySize)
+ bucketName = objectToExpirationEpochBucketName(cnr, bucketName)
+ return tx.Bucket(bucketName)
+ }
+ return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr)
+}
+
+func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
+ if bc == nil {
+ bucketName := make([]byte, bucketKeySize)
+ bucketName = primaryBucketName(cnr, bucketName)
+ return tx.Bucket(bucketName)
+ }
+ return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr)
+}
+
+func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket {
+ value, ok := (*m)[cnr]
+ if ok {
+ return value
+ }
+
+ if *m == nil {
+ *m = make(map[cid.ID]*bbolt.Bucket, 1)
+ }
+
+ bucketName := make([]byte, bucketKeySize)
+ bucketName = nameFunc(cnr, bucketName)
+ (*m)[cnr] = getBucket(&value, tx, bucketName)
+ return value
+}
diff --git a/pkg/local_object_storage/metabase/children.go b/pkg/local_object_storage/metabase/children.go
new file mode 100644
index 0000000000..acd3679517
--- /dev/null
+++ b/pkg/local_object_storage/metabase/children.go
@@ -0,0 +1,77 @@
+package meta
+
+import (
+ "context"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// GetChildren returns parent -> children map.
+// If an object has no children, then map will contain addr -> empty slice value.
+func (db *DB) GetChildren(ctx context.Context, addresses []oid.Address) (map[oid.Address][]oid.Address, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("GetChildren", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.GetChildren",
+ trace.WithAttributes(
+ attribute.Int("addr_count", len(addresses)),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
+ result := make(map[oid.Address][]oid.Address, len(addresses))
+
+ buffer := make([]byte, bucketKeySize)
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ for _, addr := range addresses {
+ if _, found := result[addr]; found {
+ continue
+ }
+
+ result[addr] = []oid.Address{}
+ bkt := tx.Bucket(parentBucketName(addr.Container(), buffer))
+ if bkt == nil {
+ continue
+ }
+
+ binObjIDs, err := decodeList(bkt.Get(objectKey(addr.Object(), buffer)))
+ if err != nil {
+ return err
+ }
+
+ for _, binObjID := range binObjIDs {
+ var id oid.ID
+ if err = id.Decode(binObjID); err != nil {
+ return err
+ }
+ var resultAddress oid.Address
+ resultAddress.SetContainer(addr.Container())
+ resultAddress.SetObject(id)
+ result[addr] = append(result[addr], resultAddress)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, metaerr.Wrap(err)
+ }
+ success = true
+ return result, nil
+}
diff --git a/pkg/local_object_storage/metabase/containers.go b/pkg/local_object_storage/metabase/containers.go
index 3d69649a93..da27e6085a 100644
--- a/pkg/local_object_storage/metabase/containers.go
+++ b/pkg/local_object_storage/metabase/containers.go
@@ -1,13 +1,28 @@
package meta
import (
+ "context"
"encoding/binary"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.etcd.io/bbolt"
)
-func (db *DB) Containers() (list []cid.ID, err error) {
+func (db *DB) Containers(ctx context.Context) (list []cid.ID, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("Containers", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Containers")
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -20,8 +35,8 @@ func (db *DB) Containers() (list []cid.ID, err error) {
return err
})
-
- return list, err
+ success = err == nil
+ return list, metaerr.Wrap(err)
}
func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) {
@@ -41,7 +56,7 @@ func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) {
return result, err
}
-func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) {
+func (db *DB) ContainerSize(id cid.ID) (uint64, error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -49,21 +64,22 @@ func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) {
return 0, ErrDegradedMode
}
- err = db.boltDB.View(func(tx *bbolt.Tx) error {
- size, err = db.containerSize(tx, id)
+ var size uint64
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ size = db.containerSize(tx, id)
- return err
+ return nil
})
- return size, err
+ return size, metaerr.Wrap(err)
}
-func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) (uint64, error) {
+func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) uint64 {
containerVolume := tx.Bucket(containerVolumeBucketName)
key := make([]byte, cidSize)
id.Encode(key)
- return parseContainerSize(containerVolume.Get(key)), nil
+ return parseContainerSize(containerVolume.Get(key))
}
func parseContainerID(dst *cid.ID, name []byte, ignore map[string]struct{}) bool {
diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go
index 0e2aacabcb..8d8d91dc74 100644
--- a/pkg/local_object_storage/metabase/containers_test.go
+++ b/pkg/local_object_storage/metabase/containers_test.go
@@ -1,11 +1,12 @@
package meta_test
import (
+ "context"
"math/rand"
- "sort"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -14,14 +15,17 @@ import (
)
func TestDB_Containers(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const N = 10
cids := make(map[string]int, N)
- for i := 0; i < N; i++ {
- obj := generateObject(t)
+ for range N {
+ obj := testutil.GenerateObject()
cnr, _ := obj.ContainerID()
@@ -31,7 +35,7 @@ func TestDB_Containers(t *testing.T) {
require.NoError(t, err)
}
- lst, err := db.Containers()
+ lst, err := db.Containers(context.Background())
require.NoError(t, err)
for _, cnr := range lst {
@@ -53,43 +57,29 @@ func TestDB_Containers(t *testing.T) {
}
t.Run("Inhume", func(t *testing.T) {
- obj := generateObject(t)
+ obj := testutil.GenerateObject()
require.NoError(t, putBig(db, obj))
- cnrs, err := db.Containers()
+ cnrs, err := db.Containers(context.Background())
require.NoError(t, err)
cnr, _ := obj.ContainerID()
assertContains(cnrs, cnr)
- require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.Address()))
+ require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.ID()))
- cnrs, err = db.Containers()
- require.NoError(t, err)
- assertContains(cnrs, cnr)
- })
-
- t.Run("ToMoveIt", func(t *testing.T) {
- obj := generateObject(t)
-
- require.NoError(t, putBig(db, obj))
-
- cnrs, err := db.Containers()
- require.NoError(t, err)
- cnr, _ := obj.ContainerID()
- assertContains(cnrs, cnr)
-
- require.NoError(t, metaToMoveIt(db, object.AddressOf(obj)))
-
- cnrs, err = db.Containers()
+ cnrs, err = db.Containers(context.Background())
require.NoError(t, err)
assertContains(cnrs, cnr)
})
}
func TestDB_ContainersCount(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const R, T, SG, L = 10, 11, 12, 13 // amount of object per type
@@ -99,15 +89,14 @@ func TestDB_ContainersCount(t *testing.T) {
}{
{R, objectSDK.TypeRegular},
{T, objectSDK.TypeTombstone},
- {SG, objectSDK.TypeStorageGroup},
{L, objectSDK.TypeLock},
}
expected := make([]cid.ID, 0, R+T+SG+L)
for _, upload := range uploadObjects {
- for i := 0; i < upload.amount; i++ {
- obj := generateObject(t)
+ for range upload.amount {
+ obj := testutil.GenerateObject()
obj.SetType(upload.typ)
err := putBig(db, obj)
@@ -118,22 +107,16 @@ func TestDB_ContainersCount(t *testing.T) {
}
}
- sort.Slice(expected, func(i, j int) bool {
- return expected[i].EncodeToString() < expected[j].EncodeToString()
- })
-
- got, err := db.Containers()
+ got, err := db.Containers(context.Background())
require.NoError(t, err)
-
- sort.Slice(got, func(i, j int) bool {
- return got[i].EncodeToString() < got[j].EncodeToString()
- })
-
- require.Equal(t, expected, got)
+ require.ElementsMatch(t, expected, got)
}
func TestDB_ContainerSize(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const (
C = 3
@@ -143,17 +126,17 @@ func TestDB_ContainerSize(t *testing.T) {
cids := make(map[cid.ID]int, C)
objs := make(map[cid.ID][]*objectSDK.Object, C*N)
- for i := 0; i < C; i++ {
+ for range C {
cnr := cidtest.ID()
cids[cnr] = 0
- for j := 0; j < N; j++ {
+ for range N {
size := rand.Intn(1024)
- parent := generateObjectWithCID(t, cnr)
+ parent := testutil.GenerateObjectWithCID(cnr)
parent.SetPayloadSize(uint64(size / 2))
- obj := generateObjectWithCID(t, cnr)
+ obj := testutil.GenerateObjectWithCID(cnr)
obj.SetPayloadSize(uint64(size))
idParent, _ := parent.ID()
obj.SetParentID(idParent)
@@ -181,7 +164,7 @@ func TestDB_ContainerSize(t *testing.T) {
require.NoError(t, metaInhume(
db,
object.AddressOf(obj),
- oidtest.Address(),
+ oidtest.ID(),
))
volume -= int(obj.PayloadSize())
diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go
index 1a19c3e2a3..c19c65224a 100644
--- a/pkg/local_object_storage/metabase/control.go
+++ b/pkg/local_object_storage/metabase/control.go
@@ -1,10 +1,13 @@
package meta
import (
+ "context"
"errors"
"fmt"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
@@ -18,37 +21,66 @@ var ErrDegradedMode = logicerr.New("metabase is in a degraded mode")
// ErrReadOnlyMode is returned when metabase is in a read-only mode.
var ErrReadOnlyMode = logicerr.New("metabase is in a read-only mode")
-// Open boltDB instance for metabase.
-func (db *DB) Open(readOnly bool) error {
- err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission)
- if err != nil {
- return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
+var (
+ mStaticBuckets = map[string]struct{}{
+ string(containerVolumeBucketName): {},
+ string(containerCounterBucketName): {},
+ string(graveyardBucketName): {},
+ string(garbageBucketName): {},
+ string(shardInfoBucket): {},
+ string(bucketNameLocked): {},
+ string(expEpochToObjectBucketName): {},
}
- db.log.Debug("created directory for Metabase", zap.String("path", db.info.Path))
+ // deprecatedBuckets buckets that are not used anymore.
+ deprecatedBuckets = [][]byte{
+ toMoveItBucketName,
+ }
+)
+
+// Open boltDB instance for metabase.
+func (db *DB) Open(ctx context.Context, m mode.Mode) error {
+ db.modeMtx.Lock()
+ defer db.modeMtx.Unlock()
+ db.mode = m
+ db.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
+
+ if m.NoMetabase() {
+ return nil
+ }
+ return db.openDB(ctx, m)
+}
+
+func (db *DB) openDB(ctx context.Context, mode mode.Mode) error {
+ err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission)
+ if err != nil {
+ return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err)
+ }
+
+ db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
if db.boltOptions == nil {
opts := *bbolt.DefaultOptions
db.boltOptions = &opts
}
- db.boltOptions.ReadOnly = readOnly
+ db.boltOptions.ReadOnly = mode.ReadOnly()
- return db.openBolt()
+ return metaerr.Wrap(db.openBolt(ctx))
}
-func (db *DB) openBolt() error {
+func (db *DB) openBolt(ctx context.Context) error {
var err error
db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions)
if err != nil {
- return fmt.Errorf("can't open boltDB database: %w", err)
+ return fmt.Errorf("open boltDB database: %w", err)
}
db.boltDB.MaxBatchDelay = db.boltBatchDelay
db.boltDB.MaxBatchSize = db.boltBatchSize
- db.log.Debug("opened boltDB instance for Metabase")
+ db.log.Debug(ctx, logs.MetabaseOpenedBoltDBInstanceForMetabase)
- db.log.Debug("checking metabase version")
+ db.log.Debug(ctx, logs.MetabaseCheckingMetabaseVersion)
return db.boltDB.View(func(tx *bbolt.Tx) error {
// The safest way to check if the metabase is fresh is to check if it has no buckets.
// However, shard info can be present. So here we check that the number of buckets is
@@ -77,8 +109,8 @@ func (db *DB) openBolt() error {
//
// Does nothing if metabase has already been initialized and filled. To roll back the database to its initial state,
// use Reset.
-func (db *DB) Init() error {
- return db.init(false)
+func (db *DB) Init(_ context.Context) error {
+ return metaerr.Wrap(db.init(false))
}
// Reset resets metabase. Works similar to Init but cleans up all static buckets and
@@ -91,7 +123,7 @@ func (db *DB) Reset() error {
return ErrDegradedMode
}
- return db.init(true)
+ return metaerr.Wrap(db.init(true))
}
func (db *DB) init(reset bool) error {
@@ -99,15 +131,6 @@ func (db *DB) init(reset bool) error {
return nil
}
- mStaticBuckets := map[string]struct{}{
- string(containerVolumeBucketName): {},
- string(graveyardBucketName): {},
- string(toMoveItBucketName): {},
- string(garbageBucketName): {},
- string(shardInfoBucket): {},
- string(bucketNameLocked): {},
- }
-
return db.boltDB.Update(func(tx *bbolt.Tx) error {
var err error
if !reset {
@@ -122,34 +145,43 @@ func (db *DB) init(reset bool) error {
if reset {
err := tx.DeleteBucket(name)
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
- return fmt.Errorf("could not delete static bucket %s: %w", k, err)
+ return fmt.Errorf("delete static bucket %s: %w", k, err)
}
}
_, err := tx.CreateBucketIfNotExists(name)
if err != nil {
- return fmt.Errorf("could not create static bucket %s: %w", k, err)
+ return fmt.Errorf("create static bucket %s: %w", k, err)
}
}
- if !reset {
+ for _, b := range deprecatedBuckets {
+ err := tx.DeleteBucket(b)
+ if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
+ return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err)
+ }
+ }
+
+ if !reset { // counters will be recalculated by refill metabase
err = syncCounter(tx, false)
if err != nil {
- return fmt.Errorf("could not sync object counter: %w", err)
+ return fmt.Errorf("sync object counter: %w", err)
}
return nil
}
- err = tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
+ bucketCursor := tx.Cursor()
+ name, _ := bucketCursor.First()
+ for name != nil {
if _, ok := mStaticBuckets[string(name)]; !ok {
- return tx.DeleteBucket(name)
+ if err := tx.DeleteBucket(name); err != nil {
+ return err
+ }
+ name, _ = bucketCursor.Seek(name)
+ continue
}
-
- return nil
- })
- if err != nil {
- return err
+ name, _ = bucketCursor.Next()
}
return updateVersion(tx, version)
})
@@ -166,17 +198,26 @@ func (db *DB) SyncCounters() error {
return ErrReadOnlyMode
}
- return db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error {
return syncCounter(tx, true)
- })
+ }))
}
-// Close closes boltDB instance.
-func (db *DB) Close() error {
+// Close closes boltDB instance
+// and reports metabase metric.
+func (db *DB) Close(context.Context) error {
+ var err error
if db.boltDB != nil {
- return db.boltDB.Close()
+ err = db.close()
}
- return nil
+ if err == nil {
+ db.metrics.Close()
+ }
+ return err
+}
+
+func (db *DB) close() error {
+ return metaerr.Wrap(db.boltDB.Close())
}
// Reload reloads part of the configuration.
@@ -185,7 +226,7 @@ func (db *DB) Close() error {
// If there was a problem with applying new configuration, an error is returned.
//
// If a metabase was couldn't be reopened because of an error, ErrDegradedMode is returned.
-func (db *DB) Reload(opts ...Option) (bool, error) {
+func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) {
var c cfg
for i := range opts {
opts[i](&c)
@@ -195,17 +236,19 @@ func (db *DB) Reload(opts ...Option) (bool, error) {
defer db.modeMtx.Unlock()
if db.mode.NoMetabase() || c.info.Path != "" && filepath.Clean(db.info.Path) != filepath.Clean(c.info.Path) {
- if err := db.Close(); err != nil {
+ if err := db.Close(ctx); err != nil {
return false, err
}
- db.mode = mode.Degraded
+ db.mode = mode.Disabled
+ db.metrics.SetMode(mode.ComponentDisabled)
db.info.Path = c.info.Path
- if err := db.openBolt(); err != nil {
- return false, fmt.Errorf("%w: %v", ErrDegradedMode, err)
+ if err := db.openBolt(ctx); err != nil {
+ return false, metaerr.Wrap(fmt.Errorf("%w: %v", ErrDegradedMode, err))
}
db.mode = mode.ReadWrite
+ db.metrics.SetMode(mode.ComponentReadWrite)
return true, nil
}
diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go
index a98b45103b..d264026757 100644
--- a/pkg/local_object_storage/metabase/control_test.go
+++ b/pkg/local_object_storage/metabase/control_test.go
@@ -1,10 +1,13 @@
package meta_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -12,11 +15,12 @@ import (
func TestReset(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
err := db.Reset()
require.NoError(t, err)
- obj := generateObject(t)
+ obj := testutil.GenerateObject()
addr := object.AddressOf(obj)
addrToInhume := oidtest.Address()
@@ -37,11 +41,11 @@ func TestReset(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
- err = metaInhume(db, addrToInhume, oidtest.Address())
+ err = metaInhume(db, addrToInhume, oidtest.ID())
require.NoError(t, err)
assertExists(addr, true, nil)
- assertExists(addrToInhume, false, meta.IsErrRemoved)
+ assertExists(addrToInhume, false, client.IsErrObjectAlreadyRemoved)
err = db.Reset()
require.NoError(t, err)
@@ -54,6 +58,6 @@ func metaExists(db *meta.DB, addr oid.Address) (bool, error) {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(addr)
- res, err := db.Exists(existsPrm)
+ res, err := db.Exists(context.Background(), existsPrm)
return res.Exists(), err
}
diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go
index a07328026a..732f99519e 100644
--- a/pkg/local_object_storage/metabase/counter.go
+++ b/pkg/local_object_storage/metabase/counter.go
@@ -1,16 +1,33 @@
package meta
import (
+ "bytes"
+ "context"
"encoding/binary"
+ "errors"
"fmt"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
-var objectPhyCounterKey = []byte("phy_counter")
-var objectLogicCounterKey = []byte("logic_counter")
+var (
+ objectPhyCounterKey = []byte("phy_counter")
+ objectLogicCounterKey = []byte("logic_counter")
+ objectUserCounterKey = []byte("user_counter")
+)
+
+var (
+ errInvalidKeyLenght = errors.New("invalid key length")
+ errInvalidValueLenght = errors.New("invalid value length")
+)
type objectType uint8
@@ -18,23 +35,19 @@ const (
_ objectType = iota
phy
logical
+ user
)
// ObjectCounters groups object counter
// according to metabase state.
type ObjectCounters struct {
- logic uint64
- phy uint64
+ Logic uint64
+ Phy uint64
+ User uint64
}
-// Logic returns logical object counter.
-func (o ObjectCounters) Logic() uint64 {
- return o.logic
-}
-
-// Phy returns physical object counter.
-func (o ObjectCounters) Phy() uint64 {
- return o.phy
+func (o ObjectCounters) IsZero() bool {
+ return o.Phy == 0 && o.Logic == 0 && o.User == 0
}
// ObjectCounters returns object counters that metabase has
@@ -55,29 +68,199 @@ func (db *DB) ObjectCounters() (cc ObjectCounters, err error) {
if b != nil {
data := b.Get(objectPhyCounterKey)
if len(data) == 8 {
- cc.phy = binary.LittleEndian.Uint64(data)
+ cc.Phy = binary.LittleEndian.Uint64(data)
}
data = b.Get(objectLogicCounterKey)
if len(data) == 8 {
- cc.logic = binary.LittleEndian.Uint64(data)
+ cc.Logic = binary.LittleEndian.Uint64(data)
+ }
+
+ data = b.Get(objectUserCounterKey)
+ if len(data) == 8 {
+ cc.User = binary.LittleEndian.Uint64(data)
}
}
return nil
})
- return
+ return cc, metaerr.Wrap(err)
}
-// updateCounter updates the object counter. Tx MUST be writable.
-// If inc == `true`, increases the counter, decreases otherwise.
-func (db *DB) updateCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool) error {
+type ContainerCounters struct {
+ Counts map[cid.ID]ObjectCounters
+}
+
+// ContainerCounters returns object counters for each container
+// that metabase has tracked since it was opened and initialized.
+//
+// Returns only the errors that do not allow reading counter
+// in Bolt database.
+//
+// It is guaranteed that the ContainerCounters fields are not nil.
+func (db *DB) ContainerCounters(ctx context.Context) (ContainerCounters, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("ContainerCounters", time.Since(startedAt), success)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "metabase.ContainerCounters")
+ defer span.End()
+
+ cc := ContainerCounters{
+ Counts: make(map[cid.ID]ObjectCounters),
+ }
+
+ lastKey := make([]byte, cidSize)
+
+ // there is no limit for containers count, so use batching with cancellation
+ for {
+ select {
+ case <-ctx.Done():
+ return cc, ctx.Err()
+ default:
+ }
+
+ completed, err := db.containerCountersNextBatch(lastKey, func(id cid.ID, entity ObjectCounters) {
+ cc.Counts[id] = entity
+ })
+ if err != nil {
+ return cc, err
+ }
+ if completed {
+ break
+ }
+ }
+
+ success = true
+ return cc, nil
+}
+
+func (db *DB) containerCountersNextBatch(lastKey []byte, f func(id cid.ID, entity ObjectCounters)) (bool, error) {
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return false, ErrDegradedMode
+ }
+
+ counter := 0
+ const batchSize = 1000
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(containerCounterBucketName)
+ if b == nil {
+ return ErrInterruptIterator
+ }
+ c := b.Cursor()
+ var key, value []byte
+ for key, value = c.Seek(lastKey); key != nil; key, value = c.Next() {
+ if bytes.Equal(lastKey, key) {
+ continue
+ }
+ copy(lastKey, key)
+
+ cnrID, err := parseContainerCounterKey(key)
+ if err != nil {
+ return err
+ }
+ ent, err := parseContainerCounterValue(value)
+ if err != nil {
+ return err
+ }
+ f(cnrID, ent)
+
+ counter++
+ if counter == batchSize {
+ break
+ }
+ }
+
+ if counter < batchSize { // last batch
+ return ErrInterruptIterator
+ }
+ return nil
+ })
+ if err != nil {
+ if errors.Is(err, ErrInterruptIterator) {
+ return true, nil
+ }
+ return false, metaerr.Wrap(err)
+ }
+ return false, nil
+}
+
+func (db *DB) ContainerCount(ctx context.Context, id cid.ID) (ObjectCounters, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("ContainerCount", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.ContainerCount")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ObjectCounters{}, ErrDegradedMode
+ }
+
+ var result ObjectCounters
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(containerCounterBucketName)
+ key := make([]byte, cidSize)
+ id.Encode(key)
+ v := b.Get(key)
+ if v == nil {
+ return nil
+ }
+ var err error
+ result, err = parseContainerCounterValue(v)
+ return err
+ })
+
+ return result, metaerr.Wrap(err)
+}
+
+func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
+ b := tx.Bucket(shardInfoBucket)
+ if b == nil {
+ return db.incContainerObjectCounter(tx, cnrID, isUserObject)
+ }
+
+ if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil {
+ return fmt.Errorf("increase phy object counter: %w", err)
+ }
+ if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil {
+ return fmt.Errorf("increase logical object counter: %w", err)
+ }
+ if isUserObject {
+ if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil {
+ return fmt.Errorf("increase user object counter: %w", err)
+ }
+ }
+ return db.incContainerObjectCounter(tx, cnrID, isUserObject)
+}
+
+func (db *DB) decShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64) error {
b := tx.Bucket(shardInfoBucket)
if b == nil {
return nil
}
+ return db.updateShardObjectCounterBucket(b, typ, delta, false)
+}
+
+func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error {
var counter uint64
var counterKey []byte
@@ -86,6 +269,8 @@ func (db *DB) updateCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool
counterKey = objectPhyCounterKey
case logical:
counterKey = objectLogicCounterKey
+ case user:
+ counterKey = objectUserCounterKey
default:
panic("unknown object type counter")
}
@@ -109,6 +294,65 @@ func (db *DB) updateCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool
return b.Put(counterKey, newCounter)
}
+func (db *DB) updateContainerCounter(tx *bbolt.Tx, delta map[cid.ID]ObjectCounters, inc bool) error {
+ b := tx.Bucket(containerCounterBucketName)
+ if b == nil {
+ return nil
+ }
+
+ key := make([]byte, cidSize)
+ for cnrID, cnrDelta := range delta {
+ cnrID.Encode(key)
+ if err := db.editContainerCounterValue(b, key, cnrDelta, inc); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (*DB) editContainerCounterValue(b *bbolt.Bucket, key []byte, delta ObjectCounters, inc bool) error {
+ var entity ObjectCounters
+ var err error
+ data := b.Get(key)
+ if len(data) > 0 {
+ entity, err = parseContainerCounterValue(data)
+ if err != nil {
+ return err
+ }
+ }
+ entity.Phy = nextValue(entity.Phy, delta.Phy, inc)
+ entity.Logic = nextValue(entity.Logic, delta.Logic, inc)
+ entity.User = nextValue(entity.User, delta.User, inc)
+ value := containerCounterValue(entity)
+ return b.Put(key, value)
+}
+
+func nextValue(existed, delta uint64, inc bool) uint64 {
+ if inc {
+ existed += delta
+ } else if existed <= delta {
+ existed = 0
+ } else {
+ existed -= delta
+ }
+ return existed
+}
+
+func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
+ b := tx.Bucket(containerCounterBucketName)
+ if b == nil {
+ return nil
+ }
+
+ key := make([]byte, cidSize)
+ cnrID.Encode(key)
+ c := ObjectCounters{Logic: 1, Phy: 1}
+ if isUserObject {
+ c.User = 1
+ }
+ return db.editContainerCounterValue(b, key, c, true)
+}
+
// syncCounter updates object counters according to metabase state:
// it counts all the physically/logically stored objects using internal
// indexes. Tx MUST be writable.
@@ -116,57 +360,390 @@ func (db *DB) updateCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool
// Does nothing if counters are not empty and force is false. If force is
// true, updates the counters anyway.
func syncCounter(tx *bbolt.Tx, force bool) error {
- b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
+ shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket)
if err != nil {
- return fmt.Errorf("could not get shard info bucket: %w", err)
+ return fmt.Errorf("get shard info bucket: %w", err)
}
-
- if !force && len(b.Get(objectPhyCounterKey)) == 8 && len(b.Get(objectLogicCounterKey)) == 8 {
+ shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 &&
+ len(shardInfoB.Get(objectLogicCounterKey)) == 8 &&
+ len(shardInfoB.Get(objectUserCounterKey)) == 8
+ containerObjectCounterInitialized := containerObjectCounterInitialized(tx)
+ if !force && shardObjectCounterInitialized && containerObjectCounterInitialized {
// the counters are already inited
return nil
}
+ containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName)
+ if err != nil {
+ return fmt.Errorf("get container counter bucket: %w", err)
+ }
+
var addr oid.Address
- var phyCounter uint64
- var logicCounter uint64
+ counters := make(map[cid.ID]ObjectCounters)
graveyardBKT := tx.Bucket(graveyardBucketName)
garbageBKT := tx.Bucket(garbageBucketName)
key := make([]byte, addressKeySize)
+ var isAvailable bool
- err = iteratePhyObjects(tx, func(cnr cid.ID, obj oid.ID) error {
- phyCounter++
+ err = iteratePhyObjects(tx, func(cnr cid.ID, objID oid.ID, obj *objectSDK.Object) error {
+ if v, ok := counters[cnr]; ok {
+ v.Phy++
+ counters[cnr] = v
+ } else {
+ counters[cnr] = ObjectCounters{
+ Phy: 1,
+ }
+ }
addr.SetContainer(cnr)
- addr.SetObject(obj)
+ addr.SetObject(objID)
+ isAvailable = false
// check if an object is available: not with GCMark
// and not covered with a tombstone
if inGraveyardWithKey(addressKey(addr, key), graveyardBKT, garbageBKT) == 0 {
- logicCounter++
+ if v, ok := counters[cnr]; ok {
+ v.Logic++
+ counters[cnr] = v
+ } else {
+ counters[cnr] = ObjectCounters{
+ Logic: 1,
+ }
+ }
+ isAvailable = true
+ }
+
+ if isAvailable && IsUserObject(obj) {
+ if v, ok := counters[cnr]; ok {
+ v.User++
+ counters[cnr] = v
+ } else {
+ counters[cnr] = ObjectCounters{
+ User: 1,
+ }
+ }
}
return nil
})
if err != nil {
- return fmt.Errorf("could not iterate objects: %w", err)
+ return fmt.Errorf("iterate objects: %w", err)
}
- data := make([]byte, 8)
- binary.LittleEndian.PutUint64(data, phyCounter)
+ return setObjectCounters(counters, shardInfoB, containerCounterB)
+}
- err = b.Put(objectPhyCounterKey, data)
+func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, containerCounterB *bbolt.Bucket) error {
+ var phyTotal uint64
+ var logicTotal uint64
+ var userTotal uint64
+ key := make([]byte, cidSize)
+ for cnrID, count := range counters {
+ phyTotal += count.Phy
+ logicTotal += count.Logic
+ userTotal += count.User
+
+ cnrID.Encode(key)
+ value := containerCounterValue(count)
+ err := containerCounterB.Put(key, value)
+ if err != nil {
+ return fmt.Errorf("update phy container object counter: %w", err)
+ }
+ }
+ phyData := make([]byte, 8)
+ binary.LittleEndian.PutUint64(phyData, phyTotal)
+
+ err := shardInfoB.Put(objectPhyCounterKey, phyData)
if err != nil {
- return fmt.Errorf("could not update phy object counter: %w", err)
+ return fmt.Errorf("update phy object counter: %w", err)
}
- data = make([]byte, 8)
- binary.LittleEndian.PutUint64(data, logicCounter)
+ logData := make([]byte, 8)
+ binary.LittleEndian.PutUint64(logData, logicTotal)
- err = b.Put(objectLogicCounterKey, data)
+ err = shardInfoB.Put(objectLogicCounterKey, logData)
if err != nil {
- return fmt.Errorf("could not update logic object counter: %w", err)
+ return fmt.Errorf("update logic object counter: %w", err)
+ }
+
+ userData := make([]byte, 8)
+ binary.LittleEndian.PutUint64(userData, userTotal)
+
+ err = shardInfoB.Put(objectUserCounterKey, userData)
+ if err != nil {
+ return fmt.Errorf("update user object counter: %w", err)
}
return nil
}
+
+func containerCounterValue(entity ObjectCounters) []byte {
+ res := make([]byte, 24)
+ binary.LittleEndian.PutUint64(res, entity.Phy)
+ binary.LittleEndian.PutUint64(res[8:], entity.Logic)
+ binary.LittleEndian.PutUint64(res[16:], entity.User)
+ return res
+}
+
+func parseContainerCounterKey(buf []byte) (cid.ID, error) {
+ if len(buf) != cidSize {
+ return cid.ID{}, errInvalidKeyLenght
+ }
+ var cnrID cid.ID
+ if err := cnrID.Decode(buf); err != nil {
+ return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
+ }
+ return cnrID, nil
+}
+
+// parseContainerCounterValue return phy, logic values.
+func parseContainerCounterValue(buf []byte) (ObjectCounters, error) {
+ if len(buf) != 24 {
+ return ObjectCounters{}, errInvalidValueLenght
+ }
+ return ObjectCounters{
+ Phy: binary.LittleEndian.Uint64(buf),
+ Logic: binary.LittleEndian.Uint64(buf[8:16]),
+ User: binary.LittleEndian.Uint64(buf[16:]),
+ }, nil
+}
+
+func containerObjectCounterInitialized(tx *bbolt.Tx) bool {
+ b := tx.Bucket(containerCounterBucketName)
+ if b == nil {
+ return false
+ }
+ k, v := b.Cursor().First()
+ if k == nil && v == nil {
+ return true
+ }
+ _, err := parseContainerCounterKey(k)
+ if err != nil {
+ return false
+ }
+ _, err = parseContainerCounterValue(v)
+ return err == nil
+}
+
+func IsUserObject(obj *objectSDK.Object) bool {
+ ech := obj.ECHeader()
+ if ech == nil {
+ _, hasParentID := obj.ParentID()
+ return obj.Type() == objectSDK.TypeRegular &&
+ (obj.SplitID() == nil ||
+ (hasParentID && len(obj.Children()) == 0))
+ }
+ return ech.Index() == 0 && (ech.ParentSplitID() == nil || ech.ParentSplitParentID() != nil)
+}
+
+// ZeroSizeContainers returns containers with size = 0.
+func (db *DB) ZeroSizeContainers(ctx context.Context) ([]cid.ID, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("ZeroSizeContainers", time.Since(startedAt), success)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "metabase.ZeroSizeContainers")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ var result []cid.ID
+ lastKey := make([]byte, cidSize)
+
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ completed, err := db.containerSizesNextBatch(lastKey, func(contID cid.ID, size uint64) {
+ if size == 0 {
+ result = append(result, contID)
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+ if completed {
+ break
+ }
+ }
+
+ success = true
+ return result, nil
+}
+
+func (db *DB) containerSizesNextBatch(lastKey []byte, f func(cid.ID, uint64)) (bool, error) {
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return false, ErrDegradedMode
+ }
+
+ counter := 0
+ const batchSize = 1000
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(containerVolumeBucketName)
+ c := b.Cursor()
+ var key, value []byte
+ for key, value = c.Seek(lastKey); key != nil; key, value = c.Next() {
+ if bytes.Equal(lastKey, key) {
+ continue
+ }
+ copy(lastKey, key)
+
+ size := parseContainerSize(value)
+ var id cid.ID
+ if err := id.Decode(key); err != nil {
+ return err
+ }
+ f(id, size)
+
+ counter++
+ if counter == batchSize {
+ break
+ }
+ }
+
+ if counter < batchSize {
+ return ErrInterruptIterator
+ }
+ return nil
+ })
+ if err != nil {
+ if errors.Is(err, ErrInterruptIterator) {
+ return true, nil
+ }
+ return false, metaerr.Wrap(err)
+ }
+ return false, nil
+}
+
+func (db *DB) DeleteContainerSize(ctx context.Context, id cid.ID) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("DeleteContainerSize", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.DeleteContainerSize",
+ trace.WithAttributes(
+ attribute.Stringer("container_id", id),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ if db.mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(containerVolumeBucketName)
+
+ key := make([]byte, cidSize)
+ id.Encode(key)
+ return b.Delete(key)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+// ZeroCountContainers returns containers with objects count = 0 in metabase.
+func (db *DB) ZeroCountContainers(ctx context.Context) ([]cid.ID, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("ZeroCountContainers", time.Since(startedAt), success)
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "metabase.ZeroCountContainers")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
+ var result []cid.ID
+
+ lastKey := make([]byte, cidSize)
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ completed, err := db.containerCountersNextBatch(lastKey, func(id cid.ID, entity ObjectCounters) {
+ if entity.IsZero() {
+ result = append(result, id)
+ }
+ })
+ if err != nil {
+ return nil, metaerr.Wrap(err)
+ }
+ if completed {
+ break
+ }
+ }
+ success = true
+ return result, nil
+}
+
+func (db *DB) DeleteContainerCount(ctx context.Context, id cid.ID) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("DeleteContainerCount", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.DeleteContainerCount",
+ trace.WithAttributes(
+ attribute.Stringer("container_id", id),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ if db.mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(containerCounterBucketName)
+
+ key := make([]byte, cidSize)
+ id.Encode(key)
+ return b.Delete(key)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go
index 9729deb297..950385a297 100644
--- a/pkg/local_object_storage/metabase/counter_test.go
+++ b/pkg/local_object_storage/metabase/counter_test.go
@@ -1,11 +1,14 @@
package meta_test
import (
+ "context"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -14,66 +17,122 @@ import (
const objCount = 10
func TestCounters(t *testing.T) {
- db := newDB(t)
-
- var c meta.ObjectCounters
- var err error
+ t.Parallel()
t.Run("defaults", func(t *testing.T) {
- c, err = db.ObjectCounters()
+ t.Parallel()
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Zero(t, c.Phy())
- require.Zero(t, c.Logic())
+ require.Zero(t, c.Phy)
+ require.Zero(t, c.Logic)
+ require.Zero(t, c.User)
+
+ cc, err := db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+ require.Zero(t, len(cc.Counts))
})
t.Run("put", func(t *testing.T) {
- oo := make([]*object.Object, 0, objCount)
- for i := 0; i < objCount; i++ {
- oo = append(oo, generateObject(t))
+ t.Parallel()
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ oo := make([]*objectSDK.Object, 0, objCount)
+ for range objCount {
+ oo = append(oo, testutil.GenerateObject())
}
var prm meta.PutPrm
+ exp := make(map[cid.ID]meta.ObjectCounters)
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
prm.SetObject(oo[i])
+ cnrID, _ := oo[i].ContainerID()
+ c := meta.ObjectCounters{}
+ exp[cnrID] = meta.ObjectCounters{
+ Logic: 1,
+ Phy: 1,
+ User: 1,
+ }
- _, err = db.Put(prm)
+ _, err := db.Put(context.Background(), prm)
require.NoError(t, err)
c, err = db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(i+1), c.Phy())
- require.Equal(t, uint64(i+1), c.Logic())
+ require.Equal(t, uint64(i+1), c.Phy)
+ require.Equal(t, uint64(i+1), c.Logic)
+
+ cc, err := db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
}
})
- require.NoError(t, db.Reset())
-
t.Run("delete", func(t *testing.T) {
+ t.Parallel()
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, false)
+ exp := make(map[cid.ID]meta.ObjectCounters)
+ for _, obj := range oo {
+ cnrID, _ := obj.ContainerID()
+ exp[cnrID] = meta.ObjectCounters{
+ Logic: 1,
+ Phy: 1,
+ User: 1,
+ }
+ }
+
var prm meta.DeletePrm
for i := objCount - 1; i >= 0; i-- {
prm.SetAddresses(objectcore.AddressOf(oo[i]))
- res, err := db.Delete(prm)
+ res, err := db.Delete(context.Background(), prm)
require.NoError(t, err)
- require.Equal(t, uint64(1), res.AvailableObjectsRemoved())
+ require.Equal(t, uint64(1), res.LogicCount())
- c, err = db.ObjectCounters()
+ c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(i), c.Phy())
- require.Equal(t, uint64(i), c.Logic())
+ require.Equal(t, uint64(i), c.Phy)
+ require.Equal(t, uint64(i), c.Logic)
+ require.Equal(t, uint64(i), c.User)
+
+ cnrID, _ := oo[i].ContainerID()
+ if v, ok := exp[cnrID]; ok {
+ v.Phy--
+ v.Logic--
+ v.User--
+ exp[cnrID] = v
+ }
+
+ cc, err := db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
}
})
- require.NoError(t, db.Reset())
-
t.Run("inhume", func(t *testing.T) {
+ t.Parallel()
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, false)
+ exp := make(map[cid.ID]meta.ObjectCounters)
+ for _, obj := range oo {
+ cnrID, _ := obj.ContainerID()
+ exp[cnrID] = meta.ObjectCounters{
+ Logic: 1,
+ Phy: 1,
+ User: 1,
+ }
+ }
+
inhumedObjs := make([]oid.Address, objCount/2)
for i, o := range oo {
@@ -84,66 +143,141 @@ func TestCounters(t *testing.T) {
inhumedObjs[i] = objectcore.AddressOf(o)
}
+ for _, addr := range inhumedObjs {
+ if v, ok := exp[addr.Container()]; ok {
+ v.Logic--
+ v.User--
+ if v.IsZero() {
+ delete(exp, addr.Container())
+ } else {
+ exp[addr.Container()] = v
+ }
+ }
+ }
+
var prm meta.InhumePrm
- prm.SetTombstoneAddress(oidtest.Address())
- prm.SetAddresses(inhumedObjs...)
+ for _, o := range inhumedObjs {
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(o.Container())
- res, err := db.Inhume(prm)
- require.NoError(t, err)
- require.Equal(t, uint64(len(inhumedObjs)), res.AvailableInhumed())
+ prm.SetTombstoneAddress(tombAddr)
+ prm.SetAddresses(o)
- c, err = db.ObjectCounters()
+ res, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), res.LogicInhumed())
+ require.Equal(t, uint64(1), res.UserInhumed())
+ }
+
+ c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(objCount), c.Phy())
- require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic())
+ require.Equal(t, uint64(objCount), c.Phy)
+ require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic)
+ require.Equal(t, uint64(objCount-len(inhumedObjs)), c.User)
+
+ cc, err := db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
})
- require.NoError(t, db.Reset())
-
t.Run("put_split", func(t *testing.T) {
- parObj := generateObject(t)
+ t.Parallel()
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ parObj := testutil.GenerateObject()
+
+ exp := make(map[cid.ID]meta.ObjectCounters)
// put objects and check that parent info
// does not affect the counter
- for i := 0; i < objCount; i++ {
- o := generateObject(t)
+ for i := range objCount {
+ o := testutil.GenerateObject()
if i < objCount/2 { // half of the objs will have the parent
o.SetParent(parObj)
+ o.SetSplitID(objectSDK.NewSplitID())
+ }
+
+ cnrID, _ := o.ContainerID()
+ exp[cnrID] = meta.ObjectCounters{
+ Logic: 1,
+ Phy: 1,
+ User: 1,
}
require.NoError(t, putBig(db, o))
- c, err = db.ObjectCounters()
+ c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(i+1), c.Phy())
- require.Equal(t, uint64(i+1), c.Logic())
+ require.Equal(t, uint64(i+1), c.Phy)
+ require.Equal(t, uint64(i+1), c.Logic)
+ require.Equal(t, uint64(i+1), c.User)
+
+ cc, err := db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
}
})
- require.NoError(t, db.Reset())
-
t.Run("delete_split", func(t *testing.T) {
+ t.Parallel()
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, true)
+ exp := make(map[cid.ID]meta.ObjectCounters)
+ for _, obj := range oo {
+ cnrID, _ := obj.ContainerID()
+ exp[cnrID] = meta.ObjectCounters{
+ Logic: 1,
+ Phy: 1,
+ User: 1,
+ }
+ }
+
// delete objects that have parent info
// and check that it does not affect
// the counter
for i, o := range oo {
- require.NoError(t, metaDelete(db, objectcore.AddressOf(o)))
+ addr := objectcore.AddressOf(o)
+ require.NoError(t, metaDelete(db, addr))
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(objCount-i-1), c.Phy())
- require.Equal(t, uint64(objCount-i-1), c.Logic())
+ require.Equal(t, uint64(objCount-i-1), c.Phy)
+ require.Equal(t, uint64(objCount-i-1), c.Logic)
+ require.Equal(t, uint64(objCount-i-1), c.User)
+
+ if v, ok := exp[addr.Container()]; ok {
+ v.Logic--
+ v.Phy--
+ v.User--
+ if v.IsZero() {
+ delete(exp, addr.Container())
+ } else {
+ exp[addr.Container()] = v
+ }
+ }
}
})
- require.NoError(t, db.Reset())
-
t.Run("inhume_split", func(t *testing.T) {
+ t.Parallel()
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, true)
+ exp := make(map[cid.ID]meta.ObjectCounters)
+ for _, obj := range oo {
+ cnrID, _ := obj.ContainerID()
+ exp[cnrID] = meta.ObjectCounters{
+ Logic: 1,
+ Phy: 1,
+ User: 1,
+ }
+ }
+
inhumedObjs := make([]oid.Address, objCount/2)
for i, o := range oo {
@@ -154,21 +288,93 @@ func TestCounters(t *testing.T) {
inhumedObjs[i] = objectcore.AddressOf(o)
}
+ for _, addr := range inhumedObjs {
+ if v, ok := exp[addr.Container()]; ok {
+ v.Logic--
+ v.User--
+ if v.IsZero() {
+ delete(exp, addr.Container())
+ } else {
+ exp[addr.Container()] = v
+ }
+ }
+ }
+
var prm meta.InhumePrm
- prm.SetTombstoneAddress(oidtest.Address())
- prm.SetAddresses(inhumedObjs...)
+ for _, o := range inhumedObjs {
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(o.Container())
- _, err = db.Inhume(prm)
+ prm.SetTombstoneAddress(tombAddr)
+ prm.SetAddresses(o)
+
+ _, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ }
+
+ c, err := db.ObjectCounters()
require.NoError(t, err)
- c, err = db.ObjectCounters()
+ require.Equal(t, uint64(objCount), c.Phy)
+ require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic)
+ require.Equal(t, uint64(objCount-len(inhumedObjs)), c.User)
+
+ cc, err := db.ContainerCounters(context.Background())
require.NoError(t, err)
- require.Equal(t, uint64(objCount), c.Phy())
- require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic())
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
})
}
+func TestDoublePut(t *testing.T) {
+ t.Parallel()
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ obj := testutil.GenerateObject()
+
+ exp := make(map[cid.ID]meta.ObjectCounters)
+ cnrID, _ := obj.ContainerID()
+ exp[cnrID] = meta.ObjectCounters{
+ Logic: 1,
+ Phy: 1,
+ User: 1,
+ }
+
+ var prm meta.PutPrm
+ prm.SetObject(obj)
+ pr, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+ require.True(t, pr.Inserted)
+
+ c, err := db.ObjectCounters()
+ require.NoError(t, err)
+
+ require.Equal(t, uint64(1), c.Phy)
+ require.Equal(t, uint64(1), c.Logic)
+ require.Equal(t, uint64(1), c.User)
+
+ cc, err := db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+
+ pr, err = db.Put(context.Background(), prm)
+ require.NoError(t, err)
+ require.False(t, pr.Inserted)
+
+ c, err = db.ObjectCounters()
+ require.NoError(t, err)
+
+ require.Equal(t, uint64(1), c.Phy)
+ require.Equal(t, uint64(1), c.Logic)
+ require.Equal(t, uint64(1), c.User)
+
+ cc, err = db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+}
+
func TestCounters_Expired(t *testing.T) {
// That test is about expired objects without
// GCMark yet. Such objects should be treated as
@@ -181,18 +387,34 @@ func TestCounters_Expired(t *testing.T) {
es := &epochState{epoch}
db := newDB(t, meta.WithEpochState(es))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := make([]oid.Address, objCount)
for i := range oo {
- oo[i] = putWithExpiration(t, db, object.TypeRegular, epoch+1)
+ oo[i] = putWithExpiration(t, db, objectSDK.TypeRegular, epoch+1)
+ }
+
+ exp := make(map[cid.ID]meta.ObjectCounters)
+ for _, addr := range oo {
+ exp[addr.Container()] = meta.ObjectCounters{
+ Logic: 1,
+ Phy: 1,
+ User: 1,
+ }
}
// 1. objects are available and counters are correct
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(objCount), c.Phy())
- require.Equal(t, uint64(objCount), c.Logic())
+ require.Equal(t, uint64(objCount), c.Phy)
+ require.Equal(t, uint64(objCount), c.Logic)
+ require.Equal(t, uint64(objCount), c.User)
+
+ cc, err := db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
for _, o := range oo {
_, err := metaGet(db, o, true)
@@ -206,8 +428,14 @@ func TestCounters_Expired(t *testing.T) {
c, err = db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(objCount), c.Phy())
- require.Equal(t, uint64(objCount), c.Logic())
+ require.Equal(t, uint64(objCount), c.Phy)
+ require.Equal(t, uint64(objCount), c.Logic)
+ require.Equal(t, uint64(objCount), c.User)
+
+ cc, err = db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
for _, o := range oo {
_, err := metaGet(db, o, true)
@@ -222,15 +450,32 @@ func TestCounters_Expired(t *testing.T) {
inhumePrm.SetGCMark()
inhumePrm.SetAddresses(oo[0])
- inhumeRes, err := db.Inhume(inhumePrm)
+ inhumeRes, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- require.Equal(t, uint64(1), inhumeRes.AvailableInhumed())
+ require.Equal(t, uint64(1), inhumeRes.LogicInhumed())
+ require.Equal(t, uint64(1), inhumeRes.UserInhumed())
c, err = db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(len(oo)), c.Phy())
- require.Equal(t, uint64(len(oo)-1), c.Logic())
+ require.Equal(t, uint64(len(oo)), c.Phy)
+ require.Equal(t, uint64(len(oo)-1), c.Logic)
+ require.Equal(t, uint64(len(oo)-1), c.User)
+
+ if v, ok := exp[oo[0].Container()]; ok {
+ v.Logic--
+ v.User--
+ if v.IsZero() {
+ delete(exp, oo[0].Container())
+ } else {
+ exp[oo[0].Container()] = v
+ }
+ }
+
+ cc, err = db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
// 4. `Delete` an object with GCMark should decrease the
// phy counter but does not affect the logic counter (after
@@ -239,16 +484,28 @@ func TestCounters_Expired(t *testing.T) {
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(oo[0])
- deleteRes, err := db.Delete(deletePrm)
+ deleteRes, err := db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
- require.Zero(t, deleteRes.AvailableObjectsRemoved())
+ require.Zero(t, deleteRes.LogicCount())
+ require.Zero(t, deleteRes.UserCount())
+
+ if v, ok := exp[oo[0].Container()]; ok {
+ v.Phy--
+ exp[oo[0].Container()] = v
+ }
oo = oo[1:]
c, err = db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(len(oo)), c.Phy())
- require.Equal(t, uint64(len(oo)), c.Logic())
+ require.Equal(t, uint64(len(oo)), c.Phy)
+ require.Equal(t, uint64(len(oo)), c.Logic)
+ require.Equal(t, uint64(len(oo)), c.User)
+
+ cc, err = db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
// 5 `Delete` an expired object (like it would the control
// service do) should decrease both counters despite the
@@ -256,41 +513,56 @@ func TestCounters_Expired(t *testing.T) {
deletePrm.SetAddresses(oo[0])
- deleteRes, err = db.Delete(deletePrm)
+ deleteRes, err = db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
- require.Equal(t, uint64(1), deleteRes.AvailableObjectsRemoved())
+ require.Equal(t, uint64(1), deleteRes.LogicCount())
+ require.Equal(t, uint64(1), deleteRes.UserCount())
+
+ if v, ok := exp[oo[0].Container()]; ok {
+ v.Phy--
+ v.Logic--
+ v.User--
+ exp[oo[0].Container()] = v
+ }
oo = oo[1:]
c, err = db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(len(oo)), c.Phy())
- require.Equal(t, uint64(len(oo)), c.Logic())
+ require.Equal(t, uint64(len(oo)), c.Phy)
+ require.Equal(t, uint64(len(oo)), c.Logic)
+ require.Equal(t, uint64(len(oo)), c.User)
+
+ cc, err = db.ContainerCounters(context.Background())
+ require.NoError(t, err)
+
+ require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
}
-func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*object.Object {
+func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK.Object {
var prm meta.PutPrm
var err error
- parent := generateObject(t)
+ parent := testutil.GenerateObject()
- oo := make([]*object.Object, 0, count)
- for i := 0; i < count; i++ {
- o := generateObject(t)
+ oo := make([]*objectSDK.Object, 0, count)
+ for i := range count {
+ o := testutil.GenerateObject()
if withParent {
o.SetParent(parent)
+ o.SetSplitID(objectSDK.NewSplitID())
}
oo = append(oo, o)
prm.SetObject(o)
- _, err = db.Put(prm)
+ _, err = db.Put(context.Background(), prm)
require.NoError(t, err)
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(i+1), c.Phy())
- require.Equal(t, uint64(i+1), c.Logic())
+ require.Equal(t, uint64(i+1), c.Phy)
+ require.Equal(t, uint64(i+1), c.Logic)
}
return oo
diff --git a/pkg/local_object_storage/metabase/db.go b/pkg/local_object_storage/metabase/db.go
index 5a9ca3aa95..4474aa2291 100644
--- a/pkg/local_object_storage/metabase/db.go
+++ b/pkg/local_object_storage/metabase/db.go
@@ -11,10 +11,10 @@ import (
"sync"
"time"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/mr-tron/base58"
"go.etcd.io/bbolt"
"go.uber.org/zap"
@@ -39,7 +39,7 @@ type DB struct {
modeMtx sync.RWMutex
mode mode.Mode
- matchers map[object.SearchMatchType]matcher
+ matchers map[objectSDK.SearchMatchType]matcher
boltDB *bbolt.DB
@@ -60,6 +60,7 @@ type cfg struct {
log *logger.Logger
epochState EpochState
+ metrics Metrics
}
func defaultCfg() *cfg {
@@ -69,7 +70,8 @@ func defaultCfg() *cfg {
},
boltBatchDelay: bbolt.DefaultMaxBatchDelay,
boltBatchSize: bbolt.DefaultMaxBatchSize,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
+ metrics: &noopMetrics{},
}
}
@@ -87,24 +89,25 @@ func New(opts ...Option) *DB {
return &DB{
cfg: c,
- matchers: map[object.SearchMatchType]matcher{
- object.MatchUnknown: {
+ matchers: map[objectSDK.SearchMatchType]matcher{
+ objectSDK.MatchUnknown: {
matchSlow: unknownMatcher,
matchBucket: unknownMatcherBucket,
},
- object.MatchStringEqual: {
+ objectSDK.MatchStringEqual: {
matchSlow: stringEqualMatcher,
matchBucket: stringEqualMatcherBucket,
},
- object.MatchStringNotEqual: {
+ objectSDK.MatchStringNotEqual: {
matchSlow: stringNotEqualMatcher,
matchBucket: stringNotEqualMatcherBucket,
},
- object.MatchCommonPrefix: {
+ objectSDK.MatchCommonPrefix: {
matchSlow: stringCommonPrefixMatcher,
matchBucket: stringCommonPrefixMatcherBucket,
},
},
+ mode: mode.Disabled,
}
}
@@ -260,7 +263,7 @@ func unknownMatcherBucket(_ *bbolt.Bucket, _ string, _ string, _ func([]byte, []
// in boltDB. Useful for getting filter values from unique and list indexes.
func bucketKeyHelper(hdr string, val string) []byte {
switch hdr {
- case v2object.FilterHeaderParent:
+ case v2object.FilterHeaderParent, v2object.FilterHeaderECParent:
v, err := base58.Decode(val)
if err != nil {
return nil
@@ -274,7 +277,7 @@ func bucketKeyHelper(hdr string, val string) []byte {
return v
case v2object.FilterHeaderSplitID:
- s := object.NewSplitID()
+ s := objectSDK.NewSplitID()
err := s.Parse(val)
if err != nil {
@@ -292,6 +295,11 @@ func (db *DB) SetLogger(l *logger.Logger) {
db.log = l
}
+// SetParentID sets parent ID to nested components. It is used after the shard ID was generated to use it in logs.
+func (db *DB) SetParentID(parentID string) {
+ db.metrics.SetParentID(parentID)
+}
+
// WithLogger returns option to set logger of DB.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
@@ -349,3 +357,10 @@ func WithEpochState(s EpochState) Option {
c.epochState = s
}
}
+
+// WithMetrics returns option to specify metrics collector.
+func WithMetrics(m Metrics) Option {
+ return func(c *cfg) {
+ c.metrics = m
+ }
+}
diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go
index 3e70a30bac..edaeb13c5c 100644
--- a/pkg/local_object_storage/metabase/db_test.go
+++ b/pkg/local_object_storage/metabase/db_test.go
@@ -1,23 +1,18 @@
package meta_test
import (
- "os"
+ "context"
+ "path/filepath"
"strconv"
"testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
"github.com/stretchr/testify/require"
)
@@ -32,12 +27,22 @@ func (s epochState) CurrentEpoch() uint64 {
}
// saves "big" object in DB.
-func putBig(db *meta.DB, obj *object.Object) error {
+func putBig(db *meta.DB, obj *objectSDK.Object) error {
return metaPut(db, obj, nil)
}
-func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs object.SearchFilters, exp ...oid.Address) {
- res, err := metaSelect(db, cnr, fs)
+func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) {
+ res, err := metaSelect(db, cnr, fs, false)
+ require.NoError(t, err)
+ require.Len(t, res, len(exp))
+
+ for i := range exp {
+ require.Contains(t, res, exp[i])
+ }
+}
+
+func testSelect2(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, useAttrIndex bool, exp ...oid.Address) {
+ res, err := metaSelect(db, cnr, fs, useAttrIndex)
require.NoError(t, err)
require.Len(t, res, len(exp))
@@ -47,70 +52,27 @@ func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs object.SearchFilters,
}
func newDB(t testing.TB, opts ...meta.Option) *meta.DB {
- path := t.Name()
-
bdb := meta.New(
append([]meta.Option{
- meta.WithPath(path),
- meta.WithPermissions(0600),
+ meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
+ meta.WithPermissions(0o600),
meta.WithEpochState(epochState{}),
}, opts...)...,
)
- require.NoError(t, bdb.Open(false))
- require.NoError(t, bdb.Init())
-
- t.Cleanup(func() {
- bdb.Close()
- os.Remove(bdb.DumpInfo().Path)
- })
+ require.NoError(t, bdb.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, bdb.Init(context.Background()))
return bdb
}
-func generateObject(t testing.TB) *object.Object {
- return generateObjectWithCID(t, cidtest.ID())
-}
-
-func generateObjectWithCID(t testing.TB, cnr cid.ID) *object.Object {
- var ver version.Version
- ver.SetMajor(2)
- ver.SetMinor(1)
-
- csum := checksumtest.Checksum()
-
- var csumTZ checksum.Checksum
- csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
-
- obj := object.New()
- obj.SetID(oidtest.ID())
- obj.SetOwnerID(usertest.ID())
- obj.SetContainerID(cnr)
- obj.SetVersion(&ver)
- obj.SetPayloadChecksum(csum)
- obj.SetPayloadHomomorphicHash(csumTZ)
- obj.SetPayload([]byte{1, 2, 3, 4, 5})
-
- return obj
-}
-
-func addAttribute(obj *object.Object, key, val string) {
- var attr object.Attribute
- attr.SetKey(key)
- attr.SetValue(val)
-
- attrs := obj.Attributes()
- attrs = append(attrs, attr)
- obj.SetAttributes(attrs...)
-}
-
func checkExpiredObjects(t *testing.T, db *meta.DB, f func(exp, nonExp *objectSDK.Object)) {
- expObj := generateObject(t)
+ expObj := testutil.GenerateObject()
setExpiration(expObj, currEpoch-1)
require.NoError(t, metaPut(db, expObj, nil))
- nonExpObj := generateObject(t)
+ nonExpObj := testutil.GenerateObject()
setExpiration(nonExpObj, currEpoch)
require.NoError(t, metaPut(db, nonExpObj, nil))
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 79f870372e..9a5a6e5746 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -2,17 +2,26 @@ package meta
import (
"bytes"
+ "context"
"errors"
"fmt"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
+var errFailedToRemoveUniqueIndexes = errors.New("can't remove unique indexes")
+
// DeletePrm groups the parameters of Delete operation.
type DeletePrm struct {
addrs []oid.Address
@@ -20,31 +29,42 @@ type DeletePrm struct {
// DeleteRes groups the resulting values of Delete operation.
type DeleteRes struct {
- rawRemoved uint64
- availableRemoved uint64
- sizes []uint64
- availableSizes []uint64
+ phyCount uint64
+ logicCount uint64
+ userCount uint64
+ phySize uint64
+ logicSize uint64
+ removedByCnrID map[cid.ID]ObjectCounters
}
-// AvailableObjectsRemoved returns the number of removed available
+// LogicCount returns the number of removed logic
// objects.
-func (d DeleteRes) AvailableObjectsRemoved() uint64 {
- return d.availableRemoved
+func (d DeleteRes) LogicCount() uint64 {
+ return d.logicCount
}
-// RawObjectsRemoved returns the number of removed raw objects.
-func (d DeleteRes) RawObjectsRemoved() uint64 {
- return d.rawRemoved
+func (d DeleteRes) UserCount() uint64 {
+ return d.userCount
}
-// RemovedPhysicalObjectSizes returns the sizes of removed physical objects.
-func (d DeleteRes) RemovedPhysicalObjectSizes() []uint64 {
- return d.sizes
+// RemovedByCnrID returns the number of removed objects by container ID.
+func (d DeleteRes) RemovedByCnrID() map[cid.ID]ObjectCounters {
+ return d.removedByCnrID
}
-// RemovedLogicalObjectSizes returns the sizes of removed logical objects.
-func (d DeleteRes) RemovedLogicalObjectSizes() []uint64 {
- return d.availableSizes
+// PhyCount returns the number of removed physical objects.
+func (d DeleteRes) PhyCount() uint64 {
+ return d.phyCount
+}
+
+// PhySize returns the size of removed physical objects.
+func (d DeleteRes) PhySize() uint64 {
+ return d.phySize
+}
+
+// LogicSize returns the size of removed logical objects.
+func (d DeleteRes) LogicSize() uint64 {
+ return d.logicSize
}
// SetAddresses is a Delete option to set the addresses of the objects to delete.
@@ -57,15 +77,27 @@ func (p *DeletePrm) SetAddresses(addrs ...oid.Address) {
type referenceNumber struct {
all, cur int
- addr oid.Address
-
obj *objectSDK.Object
}
type referenceCounter map[string]*referenceNumber
// Delete removed object records from metabase indexes.
-func (db *DB) Delete(prm DeletePrm) (DeleteRes, error) {
+func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
+ var (
+ startedAt = time.Now()
+ deleted = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("Delete", time.Since(startedAt), deleted)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Delete",
+ trace.WithAttributes(
+ attribute.Int("addr_count", len(prm.addrs)),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -75,86 +107,134 @@ func (db *DB) Delete(prm DeletePrm) (DeleteRes, error) {
return DeleteRes{}, ErrReadOnlyMode
}
- var rawRemoved uint64
- var availableRemoved uint64
var err error
- var sizes = make([]uint64, len(prm.addrs))
- var availableSizes = make([]uint64, len(prm.addrs))
+ var res DeleteRes
- err = db.boltDB.Update(func(tx *bbolt.Tx) error {
- // We need to clear slice because tx can try to execute multiple times.
- rawRemoved, availableRemoved, err = db.deleteGroup(tx, prm.addrs, sizes, availableSizes)
+ err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ res, err = db.deleteGroup(tx, prm.addrs)
return err
})
if err == nil {
+ deleted = true
for i := range prm.addrs {
- storagelog.Write(db.log,
+ storagelog.Write(ctx, db.log,
storagelog.AddressField(prm.addrs[i]),
storagelog.OpField("metabase DELETE"))
}
}
- return DeleteRes{
- rawRemoved: rawRemoved,
- availableRemoved: availableRemoved,
- sizes: sizes,
- availableSizes: availableSizes,
- }, err
+ return res, metaerr.Wrap(err)
}
// deleteGroup deletes object from the metabase. Handles removal of the
// references of the split objects.
-// The first return value is a physical objects removed number: physical
-// objects that were stored. The second return value is a logical objects
-// removed number: objects that were available (without Tombstones, GCMarks
-// non-expired, etc.)
-func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address, sizes []uint64, availableSizes []uint64) (uint64, uint64, error) {
+func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address) (DeleteRes, error) {
+ res := DeleteRes{
+ removedByCnrID: make(map[cid.ID]ObjectCounters),
+ }
refCounter := make(referenceCounter, len(addrs))
currEpoch := db.epochState.CurrentEpoch()
- var rawDeleted uint64
- var availableDeleted uint64
-
for i := range addrs {
- removed, available, size, err := db.delete(tx, addrs[i], refCounter, currEpoch)
+ r, err := db.delete(tx, addrs[i], refCounter, currEpoch)
if err != nil {
- return 0, 0, err // maybe log and continue?
+ return DeleteRes{}, err
}
- if removed {
- rawDeleted++
- sizes[i] = size
- }
-
- if available {
- availableDeleted++
- availableSizes[i] = size
- }
+ applyDeleteSingleResult(r, &res, addrs, i)
}
- if rawDeleted > 0 {
- err := db.updateCounter(tx, phy, rawDeleted, false)
- if err != nil {
- return 0, 0, fmt.Errorf("could not decrease phy object counter: %w", err)
- }
- }
-
- if availableDeleted > 0 {
- err := db.updateCounter(tx, logical, availableDeleted, false)
- if err != nil {
- return 0, 0, fmt.Errorf("could not decrease logical object counter: %w", err)
- }
+ if err := db.updateCountersDelete(tx, res); err != nil {
+ return DeleteRes{}, err
}
for _, refNum := range refCounter {
if refNum.cur == refNum.all {
err := db.deleteObject(tx, refNum.obj, true)
if err != nil {
- return rawDeleted, availableDeleted, err // maybe log and continue?
+ return DeleteRes{}, err
}
}
}
- return rawDeleted, availableDeleted, nil
+ return res, nil
+}
+
+func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error {
+ if res.phyCount > 0 {
+ err := db.decShardObjectCounter(tx, phy, res.phyCount)
+ if err != nil {
+ return fmt.Errorf("decrease phy object counter: %w", err)
+ }
+ }
+
+ if res.logicCount > 0 {
+ err := db.decShardObjectCounter(tx, logical, res.logicCount)
+ if err != nil {
+ return fmt.Errorf("decrease logical object counter: %w", err)
+ }
+ }
+
+ if res.userCount > 0 {
+ err := db.decShardObjectCounter(tx, user, res.userCount)
+ if err != nil {
+ return fmt.Errorf("decrease user object counter: %w", err)
+ }
+ }
+
+ if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil {
+ return fmt.Errorf("decrease container object counter: %w", err)
+ }
+ return nil
+}
+
+func applyDeleteSingleResult(r deleteSingleResult, res *DeleteRes, addrs []oid.Address, i int) {
+ if r.Phy {
+ if v, ok := res.removedByCnrID[addrs[i].Container()]; ok {
+ v.Phy++
+ res.removedByCnrID[addrs[i].Container()] = v
+ } else {
+ res.removedByCnrID[addrs[i].Container()] = ObjectCounters{
+ Phy: 1,
+ }
+ }
+
+ res.phyCount++
+ res.phySize += r.Size
+ }
+
+ if r.Logic {
+ if v, ok := res.removedByCnrID[addrs[i].Container()]; ok {
+ v.Logic++
+ res.removedByCnrID[addrs[i].Container()] = v
+ } else {
+ res.removedByCnrID[addrs[i].Container()] = ObjectCounters{
+ Logic: 1,
+ }
+ }
+
+ res.logicCount++
+ res.logicSize += r.Size
+ }
+
+ if r.User {
+ if v, ok := res.removedByCnrID[addrs[i].Container()]; ok {
+ v.User++
+ res.removedByCnrID[addrs[i].Container()] = v
+ } else {
+ res.removedByCnrID[addrs[i].Container()] = ObjectCounters{
+ User: 1,
+ }
+ }
+
+ res.userCount++
+ }
+}
+
+type deleteSingleResult struct {
+ Phy bool
+ Logic bool
+ User bool
+ Size uint64
}
// delete removes object indexes from the metabase. Counts the references
@@ -162,8 +242,8 @@ func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address, sizes []uint64, ava
// The first return value indicates if an object has been removed. (removing a
// non-exist object is error-free). The second return value indicates if an
// object was available before the removal (for calculating the logical object
-// counter). The third return value is removed object payload size.
-func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter, currEpoch uint64) (bool, bool, uint64, error) {
+// counter). The third return value The fourth return value is removed object payload size.
+func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter, currEpoch uint64) (deleteSingleResult, error) {
key := make([]byte, addressKeySize)
addrKey := addressKey(addr, key)
garbageBKT := tx.Bucket(garbageBucketName)
@@ -171,27 +251,39 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
removeAvailableObject := inGraveyardWithKey(addrKey, graveyardBKT, garbageBKT) == 0
+ // unmarshal object, work only with physically stored (raw == true) objects
+ obj, err := db.get(tx, addr, key, false, true, currEpoch)
+ if err != nil {
+ if client.IsErrObjectNotFound(err) {
+ addrKey = addressKey(addr, key)
+ if garbageBKT != nil {
+ err := garbageBKT.Delete(addrKey)
+ if err != nil {
+ return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
+ }
+ }
+ return deleteSingleResult{}, nil
+ }
+ var siErr *objectSDK.SplitInfoError
+ var ecErr *objectSDK.ECInfoError
+ if errors.As(err, &siErr) || errors.As(err, &ecErr) {
+ // if object is virtual (parent) then do nothing, it will be deleted with last child
+ // if object is erasure-coded it will be deleted with the last chunk presented on the shard
+ return deleteSingleResult{}, nil
+ }
+
+ return deleteSingleResult{}, err
+ }
+
+ addrKey = addressKey(addr, key)
// remove record from the garbage bucket
if garbageBKT != nil {
err := garbageBKT.Delete(addrKey)
if err != nil {
- return false, false, 0, fmt.Errorf("could not remove from garbage bucket: %w", err)
+ return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
}
}
- // unmarshal object, work only with physically stored (raw == true) objects
- obj, err := db.get(tx, addr, key, false, true, currEpoch)
- if err != nil {
- var siErr *objectSDK.SplitInfoError
- var notFoundErr apistatus.ObjectNotFound
-
- if errors.As(err, ¬FoundErr) || errors.As(err, &siErr) {
- return false, false, 0, nil
- }
-
- return false, false, 0, err
- }
-
// if object is an only link to a parent, then remove parent
if parent := obj.Parent(); parent != nil {
parAddr := object.AddressOf(parent)
@@ -201,9 +293,8 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
nRef, ok := refCounter[k]
if !ok {
nRef = &referenceNumber{
- all: parentLength(tx, parAddr),
- addr: parAddr,
- obj: parent,
+ all: parentLength(tx, parAddr),
+ obj: parent,
}
refCounter[k] = nRef
@@ -212,13 +303,24 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
nRef.cur++
}
+ isUserObject := IsUserObject(obj)
+
// remove object
err = db.deleteObject(tx, obj, false)
if err != nil {
- return false, false, 0, fmt.Errorf("could not remove object: %w", err)
+ return deleteSingleResult{}, fmt.Errorf("remove object: %w", err)
}
- return true, removeAvailableObject, obj.PayloadSize(), nil
+ if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil {
+ return deleteSingleResult{}, err
+ }
+
+ return deleteSingleResult{
+ Phy: true,
+ Logic: removeAvailableObject,
+ User: isUserObject && removeAvailableObject,
+ Size: obj.PayloadSize(),
+ }, nil
}
func (db *DB) deleteObject(
@@ -228,17 +330,30 @@ func (db *DB) deleteObject(
) error {
err := delUniqueIndexes(tx, obj, isParent)
if err != nil {
- return fmt.Errorf("can't remove unique indexes")
+ return errFailedToRemoveUniqueIndexes
}
err = updateListIndexes(tx, obj, delListIndexItem)
if err != nil {
- return fmt.Errorf("can't remove list indexes: %w", err)
+ return fmt.Errorf("remove list indexes: %w", err)
}
err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
if err != nil {
- return fmt.Errorf("can't remove fake bucket tree indexes: %w", err)
+ return fmt.Errorf("remove fake bucket tree indexes: %w", err)
+ }
+
+ if isParent {
+ // remove record from the garbage bucket, because regular object deletion does nothing for virtual object
+ garbageBKT := tx.Bucket(garbageBucketName)
+ if garbageBKT != nil {
+ key := make([]byte, addressKeySize)
+ addrKey := addressKey(object.AddressOf(obj), key)
+ err := garbageBKT.Delete(addrKey)
+ if err != nil {
+ return fmt.Errorf("remove from garbage bucket: %w", err)
+ }
+ }
}
return nil
@@ -248,12 +363,12 @@ func (db *DB) deleteObject(
func parentLength(tx *bbolt.Tx, addr oid.Address) int {
bucketName := make([]byte, bucketKeySize)
- bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:]))
+ bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName))
if bkt == nil {
return 0
}
- lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:])))
+ lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName)))
if err != nil {
return 0
}
@@ -261,25 +376,11 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int {
return len(lst)
}
-func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) {
+func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt := tx.Bucket(item.name)
if bkt != nil {
- _ = bkt.Delete(item.key) // ignore error, best effort there
+ return bkt.Delete(item.key)
}
-}
-
-func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt := tx.Bucket(item.name)
- if bkt == nil {
- return nil
- }
-
- fkbtRoot := bkt.Bucket(item.key)
- if fkbtRoot == nil {
- return nil
- }
-
- _ = fkbtRoot.Delete(item.val) // ignore error, best effort there
return nil
}
@@ -305,26 +406,62 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
// if list empty, remove the key from bucket
if len(lst) == 0 {
- _ = bkt.Delete(item.key) // ignore error, best effort there
-
- return nil
+ return bkt.Delete(item.key)
}
// if list is not empty, then update it
encodedLst, err := encodeList(lst)
if err != nil {
- return nil // ignore error, best effort there
+ return err
}
- _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there
- return nil
+ return bkt.Put(item.key, encodedLst)
+}
+
+func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+ bkt := tx.Bucket(item.name)
+ if bkt == nil {
+ return nil
+ }
+
+ fkbtRoot := bkt.Bucket(item.key)
+ if fkbtRoot == nil {
+ return nil
+ }
+
+ if err := fkbtRoot.Delete(item.val); err != nil {
+ return err
+ }
+
+ if hasAnyItem(fkbtRoot) {
+ return nil
+ }
+
+ if err := bkt.DeleteBucket(item.key); err != nil {
+ return err
+ }
+
+ if hasAnyItem(bkt) {
+ return nil
+ }
+
+ return tx.DeleteBucket(item.name)
+}
+
+func hasAnyItem(b *bbolt.Bucket) bool {
+ var hasAnyItem bool
+ c := b.Cursor()
+ for k, _ := c.First(); k != nil; {
+ hasAnyItem = true
+ break
+ }
+ return hasAnyItem
}
func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error {
addr := object.AddressOf(obj)
objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
- addrKey := addressKey(addr, make([]byte, addressKeySize))
cnr := addr.Container()
bucketName := make([]byte, bucketKeySize)
@@ -335,37 +472,126 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error
bucketName = primaryBucketName(cnr, bucketName)
case objectSDK.TypeTombstone:
bucketName = tombstoneBucketName(cnr, bucketName)
- case objectSDK.TypeStorageGroup:
- bucketName = storageGroupBucketName(cnr, bucketName)
case objectSDK.TypeLock:
bucketName = bucketNameLockers(cnr, bucketName)
default:
return ErrUnknownObjectType
}
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
} else {
- delUniqueIndexItem(tx, namedBucketItem{
+ if err := delUniqueIndexItem(tx, namedBucketItem{
name: parentBucketName(cnr, bucketName),
key: objKey,
- })
+ }); err != nil {
+ return err
+ }
}
- delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
+ if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
name: smallBucketName(cnr, bucketName),
key: objKey,
- })
- delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
+ }); err != nil {
+ return err
+ }
+ if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
name: rootBucketName(cnr, bucketName),
key: objKey,
- })
- delUniqueIndexItem(tx, namedBucketItem{ // remove from ToMoveIt index
- name: toMoveItBucketName,
- key: addrKey,
- })
+ }); err != nil {
+ return err
+ }
+
+ if expEpoch, ok := hasExpirationEpoch(obj); ok {
+ if err := delUniqueIndexItem(tx, namedBucketItem{
+ name: expEpochToObjectBucketName,
+ key: expirationEpochKey(expEpoch, cnr, addr.Object()),
+ }); err != nil {
+ return err
+ }
+ if err := delUniqueIndexItem(tx, namedBucketItem{
+ name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
+ key: objKey,
+ }); err != nil {
+ return err
+ }
+ }
return nil
}
+
+func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.Object, cnr cid.ID, refCounter referenceCounter) error {
+ ech := obj.ECHeader()
+ if ech == nil {
+ return nil
+ }
+
+ hasAnyChunks := hasAnyECChunks(tx, ech, cnr)
+ // drop EC parent GC mark if current EC chunk is the last one
+ if !hasAnyChunks && garbageBKT != nil {
+ var ecParentAddress oid.Address
+ ecParentAddress.SetContainer(cnr)
+ ecParentAddress.SetObject(ech.Parent())
+ addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize))
+ err := garbageBKT.Delete(addrKey)
+ if err != nil {
+ return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
+ }
+ }
+
+ // also drop EC parent root info if current EC chunk is the last one
+ if !hasAnyChunks {
+ if err := delUniqueIndexItem(tx, namedBucketItem{
+ name: rootBucketName(cnr, make([]byte, bucketKeySize)),
+ key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
+ }); err != nil {
+ return err
+ }
+ }
+
+ if ech.ParentSplitParentID() == nil {
+ return nil
+ }
+
+ var splitParentAddress oid.Address
+ splitParentAddress.SetContainer(cnr)
+ splitParentAddress.SetObject(*ech.ParentSplitParentID())
+
+ if ref, ok := refCounter[string(addressKey(splitParentAddress, make([]byte, addressKeySize)))]; ok {
+ // linking object is already processing
+ // so just inform that one more reference was deleted
+ // split info and gc marks will be deleted after linking object delete
+ ref.cur++
+ return nil
+ }
+
+ if parentLength(tx, splitParentAddress) > 0 {
+ // linking object still exists, so leave split info and gc mark deletion for linking object processing
+ return nil
+ }
+
+ // drop split parent gc mark
+ if garbageBKT != nil {
+ addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize))
+ err := garbageBKT.Delete(addrKey)
+ if err != nil {
+ return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
+ }
+ }
+
+ // drop split info
+ return delUniqueIndexItem(tx, namedBucketItem{
+ name: rootBucketName(cnr, make([]byte, bucketKeySize)),
+ key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)),
+ })
+}
+
+func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool {
+ data := getFromBucket(tx, ecInfoBucketName(cnr, make([]byte, bucketKeySize)),
+ objectKey(ech.Parent(), make([]byte, objectKeySize)))
+ return len(data) > 0
+}
diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go
new file mode 100644
index 0000000000..884da23ff5
--- /dev/null
+++ b/pkg/local_object_storage/metabase/delete_ec_test.go
@@ -0,0 +1,443 @@
+package meta
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "path/filepath"
+ "slices"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/stretchr/testify/require"
+ "go.etcd.io/bbolt"
+)
+
+func TestDeleteECObject_WithoutSplit(t *testing.T) {
+ t.Parallel()
+
+ db := New(
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochState{uint64(12)}),
+ )
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+ ecChunk := oidtest.ID()
+ ecParent := oidtest.ID()
+ tombstoneID := oidtest.ID()
+
+ chunkObj := testutil.GenerateObjectWithCID(cnr)
+ chunkObj.SetID(ecChunk)
+ chunkObj.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ chunkObj.SetPayloadSize(uint64(10))
+ chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0))
+
+ // put object with EC
+
+ var prm PutPrm
+ prm.SetObject(chunkObj)
+ prm.SetStorageID([]byte("0/0"))
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ var ecChunkAddress oid.Address
+ ecChunkAddress.SetContainer(cnr)
+ ecChunkAddress.SetObject(ecChunk)
+
+ var ecParentAddress oid.Address
+ ecParentAddress.SetContainer(cnr)
+ ecParentAddress.SetObject(ecParent)
+
+ var getPrm GetPrm
+
+ getPrm.SetAddress(ecChunkAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.NoError(t, err)
+
+ var ecInfoError *objectSDK.ECInfoError
+ getPrm.SetAddress(ecParentAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, &ecInfoError)
+ require.True(t, len(ecInfoError.ECInfo().Chunks) == 1 &&
+ ecInfoError.ECInfo().Chunks[0].Index == 0 &&
+ ecInfoError.ECInfo().Chunks[0].Total == 3)
+
+ // inhume EC parent (like Delete does)
+
+ var inhumePrm InhumePrm
+ var tombAddress oid.Address
+ tombAddress.SetContainer(cnr)
+ tombAddress.SetObject(tombstoneID)
+ inhumePrm.SetAddresses(ecParentAddress)
+ inhumePrm.SetTombstoneAddress(tombAddress)
+ _, err = db.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+
+ getPrm.SetAddress(ecParentAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+
+ getPrm.SetAddress(ecChunkAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+
+ // GC finds and deletes split, EC parent and EC chunk
+
+ var garbageAddresses []oid.Address
+ var itPrm GarbageIterationPrm
+ itPrm.SetHandler(func(g GarbageObject) error {
+ garbageAddresses = append(garbageAddresses, g.Address())
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+ require.Equal(t, 2, len(garbageAddresses))
+ require.True(t, slices.Contains(garbageAddresses, ecParentAddress))
+ require.True(t, slices.Contains(garbageAddresses, ecChunkAddress))
+
+ var deletePrm DeletePrm
+ deletePrm.SetAddresses(garbageAddresses...)
+ _, err = db.Delete(context.Background(), deletePrm)
+ require.NoError(t, err)
+
+ garbageAddresses = nil
+ itPrm.SetHandler(func(g GarbageObject) error {
+ garbageAddresses = append(garbageAddresses, g.Address())
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+ require.Equal(t, 0, len(garbageAddresses))
+
+ // after tombstone expired GC inhumes tombstone and drops graves
+
+ var tombstonedObjects []TombstonedObject
+ var graveyardIterationPrm GraveyardIterationPrm
+ graveyardIterationPrm.SetHandler(func(object TombstonedObject) error {
+ tombstonedObjects = append(tombstonedObjects, object)
+ return nil
+ })
+ require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
+ require.Equal(t, 2, len(tombstonedObjects))
+
+ _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
+ require.NoError(t, err)
+
+ // GC finds tombstone as garbage and deletes it
+
+ garbageAddresses = nil
+ itPrm.SetHandler(func(g GarbageObject) error {
+ garbageAddresses = append(garbageAddresses, g.Address())
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+ require.Equal(t, 1, len(garbageAddresses))
+ require.Equal(t, tombstoneID, garbageAddresses[0].Object())
+
+ deletePrm.SetAddresses(garbageAddresses...)
+ _, err = db.Delete(context.Background(), deletePrm)
+ require.NoError(t, err)
+
+ // no more objects should left as garbage
+
+ itPrm.SetHandler(func(g GarbageObject) error {
+ require.FailNow(t, "no garbage objects should left")
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+
+ require.NoError(t, db.boltDB.View(testVerifyNoObjectDataLeft))
+
+ require.NoError(t, testCountersAreZero(db, cnr))
+}
+
+func TestDeleteECObject_WithSplit(t *testing.T) {
+ t.Parallel()
+ for _, c := range []int{1, 2, 3} {
+ for _, l := range []bool{true, false} {
+ test := fmt.Sprintf("%d EC chunks with split info without linking object", c)
+ if l {
+ test = fmt.Sprintf("%d EC chunks with split info with linking object", c)
+ }
+ t.Run(test, func(t *testing.T) {
+ testDeleteECObjectWithSplit(t, c, l)
+ })
+ }
+ }
+}
+
+func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool) {
+ t.Parallel()
+
+ db := New(
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochState{uint64(12)}),
+ )
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+ ecChunks := make([]oid.ID, chunksCount)
+ for idx := range ecChunks {
+ ecChunks[idx] = oidtest.ID()
+ }
+ ecParentID := oidtest.ID()
+ splitParentID := oidtest.ID()
+ tombstoneID := oidtest.ID()
+ splitID := objectSDK.NewSplitID()
+ linkingID := oidtest.ID()
+
+ ecChunkObjects := make([]*objectSDK.Object, chunksCount)
+ for idx := range ecChunkObjects {
+ ecChunkObjects[idx] = testutil.GenerateObjectWithCID(cnr)
+ ecChunkObjects[idx].SetContainerID(cnr)
+ ecChunkObjects[idx].SetID(ecChunks[idx])
+ ecChunkObjects[idx].SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ ecChunkObjects[idx].SetPayloadSize(uint64(10))
+ ecChunkObjects[idx].SetECHeader(objectSDK.NewECHeader(
+ objectSDK.ECParentInfo{
+ ID: ecParentID,
+ SplitParentID: &splitParentID, SplitID: splitID,
+ }, uint32(idx), uint32(chunksCount+1), []byte{}, 0))
+ }
+
+ splitParentObj := testutil.GenerateObjectWithCID(cnr)
+ splitParentObj.SetID(splitParentID)
+
+ var linkingAddress oid.Address
+ linkingAddress.SetContainer(cnr)
+ linkingAddress.SetObject(linkingID)
+
+ linkingObj := testutil.GenerateObjectWithCID(cnr)
+ linkingObj.SetID(linkingID)
+ linkingObj.SetParent(splitParentObj)
+ linkingObj.SetParentID(splitParentID)
+ linkingObj.SetChildren(ecParentID, oidtest.ID(), oidtest.ID())
+ linkingObj.SetSplitID(splitID)
+
+ // put object with EC and split info
+
+ var prm PutPrm
+ prm.SetStorageID([]byte("0/0"))
+ for _, obj := range ecChunkObjects {
+ prm.SetObject(obj)
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+ }
+
+ if withLinking {
+ prm.SetObject(linkingObj)
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+ }
+
+ var ecParentAddress oid.Address
+ ecParentAddress.SetContainer(cnr)
+ ecParentAddress.SetObject(ecParentID)
+
+ var getPrm GetPrm
+ var ecInfoError *objectSDK.ECInfoError
+ getPrm.SetAddress(ecParentAddress)
+ _, err := db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, &ecInfoError)
+ require.True(t, len(ecInfoError.ECInfo().Chunks) == chunksCount)
+
+ var splitParentAddress oid.Address
+ splitParentAddress.SetContainer(cnr)
+ splitParentAddress.SetObject(splitParentID)
+
+ var splitInfoError *objectSDK.SplitInfoError
+ getPrm.SetAddress(splitParentAddress)
+ getPrm.SetRaw(true)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, &splitInfoError)
+ require.True(t, splitInfoError.SplitInfo() != nil)
+ require.Equal(t, splitID, splitInfoError.SplitInfo().SplitID())
+ lastPart, set := splitInfoError.SplitInfo().LastPart()
+ require.True(t, set)
+ require.Equal(t, lastPart, ecParentID)
+ if withLinking {
+ l, ok := splitInfoError.SplitInfo().Link()
+ require.True(t, ok)
+ require.Equal(t, linkingID, l)
+ }
+ getPrm.SetRaw(false)
+
+ // inhume EC parent and split objects (like Delete does)
+
+ inhumeAddresses := []oid.Address{splitParentAddress, ecParentAddress}
+ if withLinking {
+ inhumeAddresses = append(inhumeAddresses, linkingAddress)
+ }
+
+ var inhumePrm InhumePrm
+ var tombAddress oid.Address
+ tombAddress.SetContainer(cnr)
+ tombAddress.SetObject(tombstoneID)
+ inhumePrm.SetAddresses(inhumeAddresses...)
+ inhumePrm.SetTombstoneAddress(tombAddress)
+ _, err = db.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+
+ getPrm.SetAddress(ecParentAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+
+ getPrm.SetAddress(splitParentAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+
+ if withLinking {
+ getPrm.SetAddress(linkingAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+ }
+
+ for _, id := range ecChunks {
+ var ecChunkAddress oid.Address
+ ecChunkAddress.SetContainer(cnr)
+ ecChunkAddress.SetObject(id)
+ getPrm.SetAddress(ecChunkAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+ }
+
+ // GC finds and deletes split, EC parent and EC chunks
+
+ parentCount := 2 // split + ec
+ if withLinking {
+ parentCount = 3
+ }
+
+ var garbageAddresses []oid.Address
+ var itPrm GarbageIterationPrm
+ itPrm.SetHandler(func(g GarbageObject) error {
+ garbageAddresses = append(garbageAddresses, g.Address())
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+ require.Equal(t, parentCount+chunksCount, len(garbageAddresses))
+ require.True(t, slices.Contains(garbageAddresses, splitParentAddress))
+ require.True(t, slices.Contains(garbageAddresses, ecParentAddress))
+ if withLinking {
+ require.True(t, slices.Contains(garbageAddresses, linkingAddress))
+ }
+ for _, id := range ecChunks {
+ var ecChunkAddress oid.Address
+ ecChunkAddress.SetContainer(cnr)
+ ecChunkAddress.SetObject(id)
+ require.True(t, slices.Contains(garbageAddresses, ecChunkAddress))
+ }
+
+ var deletePrm DeletePrm
+ deletePrm.SetAddresses(garbageAddresses...)
+ _, err = db.Delete(context.Background(), deletePrm)
+ require.NoError(t, err)
+
+ var garbageStub []oid.Address
+ itPrm.SetHandler(func(g GarbageObject) error {
+ garbageStub = append(garbageStub, g.Address())
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+ require.Equal(t, 0, len(garbageStub))
+
+ // after tombstone expired GC inhumes tombstone and drops graves
+
+ var tombstonedObjects []TombstonedObject
+ var graveyardIterationPrm GraveyardIterationPrm
+ graveyardIterationPrm.SetHandler(func(object TombstonedObject) error {
+ tombstonedObjects = append(tombstonedObjects, object)
+ return nil
+ })
+ require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
+ require.True(t, len(tombstonedObjects) == parentCount+chunksCount)
+
+ _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
+ require.NoError(t, err)
+
+ // GC finds tombstone as garbage and deletes it
+
+ garbageAddresses = nil
+ itPrm.SetHandler(func(g GarbageObject) error {
+ garbageAddresses = append(garbageAddresses, g.Address())
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+ require.Equal(t, 1, len(garbageAddresses))
+ require.Equal(t, tombstoneID, garbageAddresses[0].Object())
+
+ deletePrm.SetAddresses(garbageAddresses...)
+ _, err = db.Delete(context.Background(), deletePrm)
+ require.NoError(t, err)
+
+ // no more objects should left as garbage
+
+ itPrm.SetHandler(func(g GarbageObject) error {
+ require.FailNow(t, "no garbage objects should left")
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+
+ require.NoError(t, db.boltDB.View(testVerifyNoObjectDataLeft))
+
+ require.NoError(t, testCountersAreZero(db, cnr))
+}
+
+func testVerifyNoObjectDataLeft(tx *bbolt.Tx) error {
+ return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
+ if bytes.Equal(name, shardInfoBucket) ||
+ bytes.Equal(name, containerCounterBucketName) ||
+ bytes.Equal(name, containerVolumeBucketName) ||
+ bytes.Equal(name, expEpochToObjectBucketName) {
+ return nil
+ }
+ return testBucketEmpty(name, b)
+ })
+}
+
+func testBucketEmpty(name []byte, b *bbolt.Bucket) error {
+ err := b.ForEach(func(k, v []byte) error {
+ if len(v) > 0 {
+ return fmt.Errorf("bucket %v is not empty", name)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ return b.ForEachBucket(func(k []byte) error {
+ return testBucketEmpty(k, b.Bucket(k))
+ })
+}
+
+func testCountersAreZero(db *DB, cnr cid.ID) error {
+ c, err := db.ContainerCount(context.Background(), cnr)
+ if err != nil {
+ return err
+ }
+ if !c.IsZero() {
+ return fmt.Errorf("container %s has non zero counters", cnr.EncodeToString())
+ }
+ s, err := db.ContainerSize(cnr)
+ if err != nil {
+ return err
+ }
+ if s != 0 {
+ return fmt.Errorf("container %s has non zero size", cnr.EncodeToString())
+ }
+ return nil
+}
diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go
new file mode 100644
index 0000000000..0329e3a73a
--- /dev/null
+++ b/pkg/local_object_storage/metabase/delete_meta_test.go
@@ -0,0 +1,85 @@
+package meta
+
+import (
+ "bytes"
+ "context"
+ "path/filepath"
+ "testing"
+
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/stretchr/testify/require"
+ "go.etcd.io/bbolt"
+)
+
+func TestPutDeleteIndexAttributes(t *testing.T) {
+ db := New([]Option{
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochState{}),
+ }...)
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj1, "S3-Access-Box-CRDT-Name", "CRDT-Name")
+ testutil.AddAttribute(obj1, objectSDK.AttributeFilePath, "/path/to/object")
+
+ var putPrm PutPrm
+ putPrm.SetObject(obj1)
+
+ _, err := db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ return nil
+ }))
+
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj2, "S3-Access-Box-CRDT-Name", "CRDT-Name")
+ testutil.AddAttribute(obj2, objectSDK.AttributeFilePath, "/path/to/object")
+
+ putPrm.SetObject(obj2)
+ putPrm.SetIndexAttributes(true)
+
+ _, err = db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ objKey := objectKey(objectCore.AddressOf(obj2).Object(), make([]byte, objectKeySize))
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.NotNil(t, b)
+ b = b.Bucket([]byte("CRDT-Name"))
+ require.NotNil(t, b)
+ require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.NotNil(t, b)
+ b = b.Bucket([]byte("/path/to/object"))
+ require.NotNil(t, b)
+ require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
+ return nil
+ }))
+
+ var dPrm DeletePrm
+ dPrm.SetAddresses(objectCore.AddressOf(obj1), objectCore.AddressOf(obj2))
+ _, err = db.Delete(context.Background(), dPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ return nil
+ }))
+}
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index 3cd314dc2f..c0762a377c 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -1,12 +1,14 @@
package meta_test
import (
+ "context"
"errors"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -16,12 +18,13 @@ import (
func TestDB_Delete(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
- parent := generateObjectWithCID(t, cnr)
- addAttribute(parent, "foo", "bar")
+ parent := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(parent, "foo", "bar")
- child := generateObjectWithCID(t, cnr)
+ child := testutil.GenerateObjectWithCID(cnr)
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
@@ -30,60 +33,52 @@ func TestDB_Delete(t *testing.T) {
err := putBig(db, child)
require.NoError(t, err)
- // fill ToMoveIt index
- err = metaToMoveIt(db, object.AddressOf(child))
- require.NoError(t, err)
-
- // check if Movable list is not empty
- l, err := metaMovable(db)
- require.NoError(t, err)
- require.Len(t, l, 1)
-
// try to remove parent, should be no-op, error-free
err = metaDelete(db, object.AddressOf(parent))
require.NoError(t, err)
// inhume parent and child so they will be on graveyard
- ts := generateObjectWithCID(t, cnr)
+ ts := testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts).Object())
+ require.NoError(t, err)
+
+ ts = testutil.GenerateObjectWithCID(cnr)
+
+ err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts).Object())
require.NoError(t, err)
// delete object
err = metaDelete(db, object.AddressOf(child))
require.NoError(t, err)
- // check if there is no data in Movable index
- l, err = metaMovable(db)
- require.NoError(t, err)
- require.Len(t, l, 0)
-
// check if they marked as already removed
ok, err := metaExists(db, object.AddressOf(child))
- require.Error(t, apistatus.ObjectAlreadyRemoved{})
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
require.False(t, ok)
ok, err = metaExists(db, object.AddressOf(parent))
- require.Error(t, apistatus.ObjectAlreadyRemoved{})
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
require.False(t, ok)
}
func TestDeleteAllChildren(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
// generate parent object
- parent := generateObjectWithCID(t, cnr)
+ parent := testutil.GenerateObjectWithCID(cnr)
// generate 2 children
- child1 := generateObjectWithCID(t, cnr)
+ child1 := testutil.GenerateObjectWithCID(cnr)
child1.SetParent(parent)
idParent, _ := parent.ID()
child1.SetParentID(idParent)
- child2 := generateObjectWithCID(t, cnr)
+ child2 := testutil.GenerateObjectWithCID(cnr)
child2.SetParent(parent)
child2.SetParentID(idParent)
@@ -108,11 +103,12 @@ func TestDeleteAllChildren(t *testing.T) {
func TestGraveOnlyDelete(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
addr := oidtest.Address()
// inhume non-existent object by address
- require.NoError(t, metaInhume(db, addr, oidtest.Address()))
+ require.NoError(t, metaInhume(db, addr, oidtest.ID()))
// delete the object data
require.NoError(t, metaDelete(db, addr))
@@ -120,6 +116,7 @@ func TestGraveOnlyDelete(t *testing.T) {
func TestExpiredObject(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
// removing expired object should be error-free
@@ -129,10 +126,83 @@ func TestExpiredObject(t *testing.T) {
})
}
+func TestDelete(t *testing.T) {
+ db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+ for range 10 {
+ obj := testutil.GenerateObjectWithCID(cnr)
+
+ var prm meta.PutPrm
+ prm.SetObject(obj)
+ prm.SetStorageID([]byte("0/0"))
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ var inhumePrm meta.InhumePrm
+ inhumePrm.SetAddresses(object.AddressOf(obj))
+ _, err = db.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+ }
+
+ var addrs []oid.Address
+ var iprm meta.GarbageIterationPrm
+ iprm.SetHandler(func(o meta.GarbageObject) error {
+ addrs = append(addrs, o.Address())
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), iprm))
+ require.Equal(t, 10, len(addrs))
+ var deletePrm meta.DeletePrm
+ deletePrm.SetAddresses(addrs...)
+ _, err := db.Delete(context.Background(), deletePrm)
+ require.NoError(t, err)
+
+ addrs = nil
+ iprm.SetHandler(func(o meta.GarbageObject) error {
+ addrs = append(addrs, o.Address())
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), iprm))
+ require.Equal(t, 0, len(addrs))
+}
+
+func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) {
+ db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ addr := oidtest.Address()
+
+ var prm meta.InhumePrm
+ prm.SetAddresses(addr)
+ prm.SetGCMark()
+ _, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+
+ var garbageCount int
+ var itPrm meta.GarbageIterationPrm
+ itPrm.SetHandler(func(g meta.GarbageObject) error {
+ garbageCount++
+ return nil
+ })
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+ require.Equal(t, 1, garbageCount)
+
+ var delPrm meta.DeletePrm
+ delPrm.SetAddresses(addr)
+ _, err = db.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
+
+ garbageCount = 0
+ require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
+ require.Equal(t, 0, garbageCount)
+}
+
func metaDelete(db *meta.DB, addrs ...oid.Address) error {
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(addrs...)
- _, err := db.Delete(deletePrm)
+ _, err := db.Delete(context.Background(), deletePrm)
return err
}
diff --git a/pkg/local_object_storage/metabase/errors.go b/pkg/local_object_storage/metabase/errors.go
index bc16dc01eb..e9ffab4392 100644
--- a/pkg/local_object_storage/metabase/errors.go
+++ b/pkg/local_object_storage/metabase/errors.go
@@ -1,19 +1,10 @@
package meta
import (
- "errors"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
// ErrObjectIsExpired is returned when the requested object's
// epoch is less than the current one. Such objects are considered
// as removed and should not be returned from the Storage Engine.
var ErrObjectIsExpired = logicerr.New("object is expired")
-
-// IsErrRemoved checks if error returned by Shard Exists/Get/Put method
-// corresponds to removed object.
-func IsErrRemoved(err error) bool {
- return errors.As(err, new(apistatus.ObjectAlreadyRemoved))
-}
diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go
index 59bd9f4f2e..7bd6f90a67 100644
--- a/pkg/local_object_storage/metabase/exists.go
+++ b/pkg/local_object_storage/metabase/exists.go
@@ -1,26 +1,32 @@
package meta
import (
+ "context"
"fmt"
- "strconv"
+ "time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ExistsPrm groups the parameters of Exists operation.
type ExistsPrm struct {
- addr oid.Address
+ addr oid.Address
+ ecParentAddr oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
type ExistsRes struct {
exists bool
+ locked bool
}
var ErrLackSplitInfo = logicerr.New("no split info on parent object")
@@ -30,17 +36,41 @@ func (p *ExistsPrm) SetAddress(addr oid.Address) {
p.addr = addr
}
+// SetECParent is an Exists option to set objects parent.
+func (p *ExistsPrm) SetECParent(addr oid.Address) {
+ p.ecParentAddr = addr
+}
+
// Exists returns the fact that the object is in the metabase.
func (p ExistsRes) Exists() bool {
return p.exists
}
+// Locked returns the fact that the object is locked.
+func (p ExistsRes) Locked() bool {
+ return p.locked
+}
+
// Exists returns ErrAlreadyRemoved if addr was marked as removed. Otherwise it
// returns true if addr is in primary index or false if it is not.
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (db *DB) Exists(prm ExistsPrm) (res ExistsRes, err error) {
+func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("Exists", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Exists",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -51,23 +81,42 @@ func (db *DB) Exists(prm ExistsPrm) (res ExistsRes, err error) {
currEpoch := db.epochState.CurrentEpoch()
err = db.boltDB.View(func(tx *bbolt.Tx) error {
- res.exists, err = db.exists(tx, prm.addr, currEpoch)
+ res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch)
return err
})
-
- return
+ success = err == nil
+ return res, metaerr.Wrap(err)
}
-func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (exists bool, err error) {
+func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) {
+ var locked bool
+ if !ecParent.Equals(oid.Address{}) {
+ st, err := objectStatus(tx, ecParent, currEpoch)
+ if err != nil {
+ return false, false, err
+ }
+ switch st {
+ case 2:
+ return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
+ case 3:
+ return false, locked, ErrObjectIsExpired
+ }
+
+ locked = objectLocked(tx, ecParent.Container(), ecParent.Object())
+ }
// check graveyard and object expiration first
- switch objectStatus(tx, addr, currEpoch) {
+ st, err := objectStatus(tx, addr, currEpoch)
+ if err != nil {
+ return false, false, err
+ }
+ switch st {
case 1:
- return false, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return false, locked, logicerr.Wrap(new(apistatus.ObjectNotFound))
case 2:
- return false, logicerr.Wrap(apistatus.ObjectAlreadyRemoved{})
+ return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
case 3:
- return false, ErrObjectIsExpired
+ return false, locked, ErrObjectIsExpired
}
objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
@@ -77,21 +126,25 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (exists b
// if graveyard is empty, then check if object exists in primary bucket
if inBucket(tx, primaryBucketName(cnr, key), objKey) {
- return true, nil
+ return true, locked, nil
}
// if primary bucket is empty, then check if object exists in parent bucket
if inBucket(tx, parentBucketName(cnr, key), objKey) {
splitInfo, err := getSplitInfo(tx, cnr, objKey)
if err != nil {
- return false, err
+ return false, locked, err
}
- return false, logicerr.Wrap(objectSDK.NewSplitInfoError(splitInfo))
+ return false, locked, logicerr.Wrap(objectSDK.NewSplitInfoError(splitInfo))
+ }
+ // if parent bucket is empty, then check if object exists in ec bucket
+ if data := getFromBucket(tx, ecInfoBucketName(cnr, key), objKey); len(data) != 0 {
+ return false, locked, getECInfoError(tx, cnr, data)
}
// if parent bucket is empty, then check if object exists in typed buckets
- return firstIrregularObjectType(tx, cnr, objKey) != objectSDK.TypeRegular, nil
+ return firstIrregularObjectType(tx, cnr, objKey) != objectSDK.TypeRegular, locked, nil
}
// objectStatus returns:
@@ -99,41 +152,29 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (exists b
// - 1 if object with GC mark;
// - 2 if object is covered with tombstone;
// - 3 if object is expired.
-func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) uint8 {
+func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
+ return objectStatusWithCache(nil, tx, addr, currEpoch)
+}
+
+func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
// locked object could not be removed/marked with GC/expired
- if objectLocked(tx, addr.Container(), addr.Object()) {
- return 0
+ if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) {
+ return 0, nil
}
- // we check only if the object is expired in the current
- // epoch since it is considered the only corner case: the
- // GC is expected to collect all the objects that have
- // expired previously for less than the one epoch duration
-
- var expired bool
-
- // bucket with objects that have expiration attr
- attrKey := make([]byte, bucketKeySize+len(objectV2.SysAttributeExpEpoch))
- expirationBucket := tx.Bucket(attributeBucketName(addr.Container(), objectV2.SysAttributeExpEpoch, attrKey))
- if expirationBucket != nil {
- // bucket that contains objects that expire in the current epoch
- prevEpochBkt := expirationBucket.Bucket([]byte(strconv.FormatUint(currEpoch-1, 10)))
- if prevEpochBkt != nil {
- rawOID := objectKey(addr.Object(), make([]byte, objectKeySize))
- if prevEpochBkt.Get(rawOID) != nil {
- expired = true
- }
- }
+ expired, err := isExpiredWithCache(bc, tx, addr, currEpoch)
+ if err != nil {
+ return 0, err
}
if expired {
- return 3
+ return 3, nil
}
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
+ graveyardBkt := getGraveyardBucket(bc, tx)
+ garbageBkt := getGarbageBucket(bc, tx)
addrKey := addressKey(addr, make([]byte, addressKeySize))
- return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt)
+ return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil
}
func inGraveyardWithKey(addrKey []byte, graveyard, garbageBCK *bbolt.Bucket) uint8 {
@@ -191,7 +232,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e
err := splitInfo.Unmarshal(rawSplitInfo)
if err != nil {
- return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err)
+ return nil, fmt.Errorf("unmarshal split info from root index: %w", err)
}
return splitInfo, nil
diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go
index ddc1f47b84..3045e17f1b 100644
--- a/pkg/local_object_storage/metabase/exists_test.go
+++ b/pkg/local_object_storage/metabase/exists_test.go
@@ -1,12 +1,14 @@
package meta_test
import (
+ "context"
"errors"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -17,16 +19,17 @@ const currEpoch = 1000
func TestDB_Exists(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
t.Run("no object", func(t *testing.T) {
- nonExist := generateObject(t)
+ nonExist := testutil.GenerateObject()
exists, err := metaExists(db, object.AddressOf(nonExist))
require.NoError(t, err)
require.False(t, exists)
})
t.Run("regular object", func(t *testing.T) {
- regular := generateObject(t)
+ regular := testutil.GenerateObject()
err := putBig(db, regular)
require.NoError(t, err)
@@ -35,17 +38,17 @@ func TestDB_Exists(t *testing.T) {
require.True(t, exists)
t.Run("removed object", func(t *testing.T) {
- err := metaInhume(db, object.AddressOf(regular), oidtest.Address())
+ err := metaInhume(db, object.AddressOf(regular), oidtest.ID())
require.NoError(t, err)
exists, err := metaExists(db, object.AddressOf(regular))
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
require.False(t, exists)
})
})
t.Run("tombstone object", func(t *testing.T) {
- ts := generateObject(t)
+ ts := testutil.GenerateObject()
ts.SetType(objectSDK.TypeTombstone)
err := putBig(db, ts)
@@ -56,20 +59,8 @@ func TestDB_Exists(t *testing.T) {
require.True(t, exists)
})
- t.Run("storage group object", func(t *testing.T) {
- sg := generateObject(t)
- sg.SetType(objectSDK.TypeStorageGroup)
-
- err := putBig(db, sg)
- require.NoError(t, err)
-
- exists, err := metaExists(db, object.AddressOf(sg))
- require.NoError(t, err)
- require.True(t, exists)
- })
-
t.Run("lock object", func(t *testing.T) {
- lock := generateObject(t)
+ lock := testutil.GenerateObject()
lock.SetType(objectSDK.TypeLock)
err := putBig(db, lock)
@@ -82,9 +73,9 @@ func TestDB_Exists(t *testing.T) {
t.Run("virtual object", func(t *testing.T) {
cnr := cidtest.ID()
- parent := generateObjectWithCID(t, cnr)
+ parent := testutil.GenerateObjectWithCID(cnr)
- child := generateObjectWithCID(t, cnr)
+ child := testutil.GenerateObjectWithCID(cnr)
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
@@ -102,16 +93,16 @@ func TestDB_Exists(t *testing.T) {
cnr := cidtest.ID()
splitID := objectSDK.NewSplitID()
- parent := generateObjectWithCID(t, cnr)
- addAttribute(parent, "foo", "bar")
+ parent := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(parent, "foo", "bar")
- child := generateObjectWithCID(t, cnr)
+ child := testutil.GenerateObjectWithCID(cnr)
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
child.SetSplitID(splitID)
- link := generateObjectWithCID(t, cnr)
+ link := testutil.GenerateObjectWithCID(cnr)
link.SetParent(parent)
link.SetParentID(idParent)
idChild, _ := child.ID()
@@ -180,6 +171,7 @@ func TestDB_Exists(t *testing.T) {
require.ErrorIs(t, err, meta.ErrObjectIsExpired)
gotObj, err = metaExists(db, object.AddressOf(nonExp))
+ require.NoError(t, err)
require.True(t, gotObj)
})
})
diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go
new file mode 100644
index 0000000000..a1351cb6f5
--- /dev/null
+++ b/pkg/local_object_storage/metabase/expired.go
@@ -0,0 +1,113 @@
+package meta
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+var errInvalidEpochValueLength = errors.New("could not parse expiration epoch: invalid data length")
+
+// FilterExpired return expired items from addresses.
+// Address considered expired if metabase does contain information about expiration and
+// expiration epoch is less than epoch.
+func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) {
+ var (
+ startedAt = time.Now()
+ success = true
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("FilterExpired", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.FilterExpired",
+ trace.WithAttributes(
+ attribute.String("epoch", strconv.FormatUint(epoch, 10)),
+ attribute.Int("addr_count", len(addresses)),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
+ result := make([]oid.Address, 0, len(addresses))
+ containerIDToObjectIDs := make(map[cid.ID][]oid.ID)
+ for _, addr := range addresses {
+ containerIDToObjectIDs[addr.Container()] = append(containerIDToObjectIDs[addr.Container()], addr.Object())
+ }
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ for containerID, objectIDs := range containerIDToObjectIDs {
+ select {
+ case <-ctx.Done():
+ return ErrInterruptIterator
+ default:
+ }
+
+ expired, err := selectExpiredObjects(tx, epoch, containerID, objectIDs)
+ if err != nil {
+ return err
+ }
+ result = append(result, expired...)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, metaerr.Wrap(err)
+ }
+ success = true
+ return result, nil
+}
+
+func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
+ return isExpiredWithCache(nil, tx, addr, currEpoch)
+}
+
+func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
+ b := getExpiredBucket(bc, tx, addr.Container())
+ if b == nil {
+ return false, nil
+ }
+ key := make([]byte, objectKeySize)
+ addr.Object().Encode(key)
+ val := b.Get(key)
+ if len(val) == 0 {
+ return false, nil
+ }
+ if len(val) != epochSize {
+ return false, errInvalidEpochValueLength
+ }
+ expEpoch := binary.LittleEndian.Uint64(val)
+ return expEpoch < currEpoch, nil
+}
+
+func selectExpiredObjects(tx *bbolt.Tx, epoch uint64, containerID cid.ID, objectIDs []oid.ID) ([]oid.Address, error) {
+ result := make([]oid.Address, 0)
+ var addr oid.Address
+ addr.SetContainer(containerID)
+ for _, objID := range objectIDs {
+ addr.SetObject(objID)
+ expired, err := isExpired(tx, addr, epoch)
+ if err != nil {
+ return nil, err
+ }
+ if expired {
+ result = append(result, addr)
+ }
+ }
+ return result, nil
+}
diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go
new file mode 100644
index 0000000000..495c1eee76
--- /dev/null
+++ b/pkg/local_object_storage/metabase/expired_test.go
@@ -0,0 +1,94 @@
+package meta_test
+
+import (
+ "context"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDB_SelectExpired(t *testing.T) {
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ containerID1 := cidtest.ID()
+
+ expiredObj11 := testutil.GenerateObject()
+ expiredObj11.SetContainerID(containerID1)
+ setExpiration(expiredObj11, 10)
+ err := putBig(db, expiredObj11)
+ require.NoError(t, err)
+
+ expiredObj12 := testutil.GenerateObject()
+ expiredObj12.SetContainerID(containerID1)
+ setExpiration(expiredObj12, 12)
+ err = putBig(db, expiredObj12)
+ require.NoError(t, err)
+
+ notExpiredObj11 := testutil.GenerateObject()
+ notExpiredObj11.SetContainerID(containerID1)
+ setExpiration(notExpiredObj11, 20)
+ err = putBig(db, notExpiredObj11)
+ require.NoError(t, err)
+
+ regularObj11 := testutil.GenerateObject()
+ regularObj11.SetContainerID(containerID1)
+ err = putBig(db, regularObj11)
+ require.NoError(t, err)
+
+ containerID2 := cidtest.ID()
+
+ expiredObj21 := testutil.GenerateObject()
+ expiredObj21.SetContainerID(containerID2)
+ setExpiration(expiredObj21, 10)
+ err = putBig(db, expiredObj21)
+ require.NoError(t, err)
+
+ expiredObj22 := testutil.GenerateObject()
+ expiredObj22.SetContainerID(containerID2)
+ setExpiration(expiredObj22, 12)
+ err = putBig(db, expiredObj22)
+ require.NoError(t, err)
+
+ notExpiredObj21 := testutil.GenerateObject()
+ notExpiredObj21.SetContainerID(containerID2)
+ setExpiration(notExpiredObj21, 20)
+ err = putBig(db, notExpiredObj21)
+ require.NoError(t, err)
+
+ regularObj21 := testutil.GenerateObject()
+ regularObj21.SetContainerID(containerID2)
+ err = putBig(db, regularObj21)
+ require.NoError(t, err)
+
+ expired, err := db.FilterExpired(context.Background(), 15,
+ []oid.Address{
+ getAddressSafe(t, expiredObj11), getAddressSafe(t, expiredObj12), getAddressSafe(t, notExpiredObj11), getAddressSafe(t, regularObj11),
+ getAddressSafe(t, expiredObj21), getAddressSafe(t, expiredObj22), getAddressSafe(t, notExpiredObj21), getAddressSafe(t, regularObj21),
+ })
+ require.NoError(t, err)
+ require.Equal(t, 4, len(expired), "invalid expired count")
+ require.Contains(t, expired, getAddressSafe(t, expiredObj11))
+ require.Contains(t, expired, getAddressSafe(t, expiredObj12))
+ require.Contains(t, expired, getAddressSafe(t, expiredObj21))
+ require.Contains(t, expired, getAddressSafe(t, expiredObj22))
+}
+
+func getAddressSafe(t *testing.T, o *objectSDK.Object) oid.Address {
+ cid, set := o.ContainerID()
+ if !set {
+ t.Fatalf("container id required")
+ }
+ id, set := o.ID()
+ if !set {
+ t.Fatalf("object id required")
+ }
+ var addr oid.Address
+ addr.SetContainer(cid)
+ addr.SetObject(id)
+ return addr
+}
diff --git a/pkg/local_object_storage/metabase/generic_test.go b/pkg/local_object_storage/metabase/generic_test.go
index 227aa9f8dc..52581b2a02 100644
--- a/pkg/local_object_storage/metabase/generic_test.go
+++ b/pkg/local_object_storage/metabase/generic_test.go
@@ -1,24 +1,19 @@
package meta
import (
- "os"
"path/filepath"
- "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest"
)
func TestGeneric(t *testing.T) {
- defer func() { _ = os.RemoveAll(t.Name()) }()
+ t.Parallel()
- var n int
newMetabase := func(t *testing.T) storagetest.Component {
- n++
- dir := filepath.Join(t.Name(), strconv.Itoa(n))
return New(
WithEpochState(epochStateImpl{}),
- WithPath(dir))
+ WithPath(filepath.Join(t.TempDir(), "metabase")))
}
storagetest.TestAll(t, newMetabase)
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index c0feda06ca..821810c090 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -1,14 +1,20 @@
package meta
import (
+ "context"
"fmt"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// GetPrm groups the parameters of Get operation.
@@ -46,7 +52,22 @@ func (r GetRes) Header() *objectSDK.Object {
// Returns an error of type apistatus.ObjectNotFound if object is missing in DB.
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (db *DB) Get(prm GetPrm) (res GetRes, err error) {
+func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("Get", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Get",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("raw", prm.raw),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -62,17 +83,25 @@ func (db *DB) Get(prm GetPrm) (res GetRes, err error) {
return err
})
-
- return
+ success = err == nil
+ return res, metaerr.Wrap(err)
}
func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
+ return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch)
+}
+
+func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
if checkStatus {
- switch objectStatus(tx, addr, currEpoch) {
+ st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
case 1:
- return nil, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
case 2:
- return nil, logicerr.Wrap(apistatus.ObjectAlreadyRemoved{})
+ return nil, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
case 3:
return nil, ErrObjectIsExpired
}
@@ -84,9 +113,15 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b
bucketName := make([]byte, bucketKeySize)
// check in primary index
- data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key)
+ if b := getPrimaryBucket(bc, tx, cnr); b != nil {
+ if data := b.Get(key); len(data) != 0 {
+ return obj, obj.Unmarshal(data)
+ }
+ }
+
+ data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(data)
+ return nil, getECInfoError(tx, cnr, data)
}
// if not found then check in tombstone index
@@ -95,12 +130,6 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b
return obj, obj.Unmarshal(data)
}
- // if not found then check in storage group index
- data = getFromBucket(tx, storageGroupBucketName(cnr, bucketName), key)
- if len(data) != 0 {
- return obj, obj.Unmarshal(data)
- }
-
// if not found then check in locker index
data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key)
if len(data) != 0 {
@@ -128,7 +157,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
bucketName := make([]byte, bucketKeySize)
parentBucket := tx.Bucket(parentBucketName(cnr, bucketName))
if parentBucket == nil {
- return nil, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
relativeLst, err := decodeList(parentBucket.Get(key))
@@ -137,26 +166,38 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
}
if len(relativeLst) == 0 { // this should never happen though
- return nil, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
- // pick last item, for now there is not difference which address to pick
- // but later list might be sorted so first or last value can be more
- // prioritized to choose
- virtualOID := relativeLst[len(relativeLst)-1]
- data := getFromBucket(tx, primaryBucketName(cnr, bucketName), virtualOID)
+ var data []byte
+ for i := 0; i < len(relativeLst) && len(data) == 0; i++ {
+ virtualOID := relativeLst[len(relativeLst)-i-1]
+ data = getFromBucket(tx, primaryBucketName(cnr, bucketName), virtualOID)
+ }
+
+ if len(data) == 0 {
+ // check if any of the relatives is an EC object
+ for _, relative := range relativeLst {
+ data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), relative)
+ if len(data) > 0 {
+ // we can't return object headers, but can return error,
+ // so assembler can try to assemble complex object
+ return nil, getSplitInfoError(tx, cnr, key)
+ }
+ }
+ }
child := objectSDK.New()
err = child.Unmarshal(data)
if err != nil {
- return nil, fmt.Errorf("can't unmarshal child with parent: %w", err)
+ return nil, fmt.Errorf("unmarshal child with parent: %w", err)
}
par := child.Parent()
if par == nil { // this should never happen though
- return nil, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
return par, nil
@@ -168,5 +209,30 @@ func getSplitInfoError(tx *bbolt.Tx, cnr cid.ID, key []byte) error {
return logicerr.Wrap(objectSDK.NewSplitInfoError(splitInfo))
}
- return logicerr.Wrap(apistatus.ObjectNotFound{})
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
+}
+
+func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error {
+ keys, err := decodeList(data)
+ if err != nil {
+ return err
+ }
+ ecInfo := objectSDK.NewECInfo()
+ for _, key := range keys {
+ // check in primary index
+ objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
+ if len(objData) != 0 {
+ obj := objectSDK.New()
+ if err := obj.Unmarshal(objData); err != nil {
+ return err
+ }
+ chunk := objectSDK.ECChunk{}
+ id, _ := obj.ID()
+ chunk.SetID(id)
+ chunk.Index = obj.ECHeader().Index()
+ chunk.Total = obj.ECHeader().Total()
+ ecInfo.AddChunk(chunk)
+ }
+ }
+ return logicerr.Wrap(objectSDK.NewECInfoError(ecInfo))
}
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index 0cfef27fe1..98c4284105 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -2,6 +2,8 @@ package meta_test
import (
"bytes"
+ "context"
+ "errors"
"fmt"
"os"
"runtime"
@@ -9,23 +11,27 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestDB_Get(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
- raw := generateObject(t)
+ raw := testutil.GenerateObject()
// equal fails on diff of attributes and <{}> attributes,
/* so we make non empty attribute slice in parent*/
- addAttribute(raw, "foo", "bar")
+ testutil.AddAttribute(raw, "foo", "bar")
t.Run("object not found", func(t *testing.T) {
_, err := metaGet(db, object.AddressOf(raw), false)
@@ -53,18 +59,6 @@ func TestDB_Get(t *testing.T) {
require.Equal(t, raw.CutPayload(), newObj)
})
- t.Run("put storage group object", func(t *testing.T) {
- raw.SetType(objectSDK.TypeStorageGroup)
- raw.SetID(oidtest.ID())
-
- err := putBig(db, raw)
- require.NoError(t, err)
-
- newObj, err := metaGet(db, object.AddressOf(raw), false)
- require.NoError(t, err)
- require.Equal(t, raw.CutPayload(), newObj)
- })
-
t.Run("put lock object", func(t *testing.T) {
raw.SetType(objectSDK.TypeLock)
raw.SetID(oidtest.ID())
@@ -81,10 +75,10 @@ func TestDB_Get(t *testing.T) {
cnr := cidtest.ID()
splitID := objectSDK.NewSplitID()
- parent := generateObjectWithCID(t, cnr)
- addAttribute(parent, "foo", "bar")
+ parent := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(parent, "foo", "bar")
- child := generateObjectWithCID(t, cnr)
+ child := testutil.GenerateObjectWithCID(cnr)
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
@@ -118,23 +112,58 @@ func TestDB_Get(t *testing.T) {
require.True(t, binaryEqual(child.CutPayload(), newChild))
})
+ t.Run("put erasure-coded object", func(t *testing.T) {
+ cnr := cidtest.ID()
+ virtual := testutil.GenerateObjectWithCID(cnr)
+ c, err := erasurecode.NewConstructor(3, 1)
+ require.NoError(t, err)
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ parts, err := c.Split(virtual, &pk.PrivateKey)
+ require.NoError(t, err)
+ for _, part := range parts {
+ err = putBig(db, part)
+ var eiError *objectSDK.ECInfoError
+ if err != nil && !errors.As(err, &eiError) {
+ require.NoError(t, err)
+ }
+ }
+ _, err = metaGet(db, object.AddressOf(virtual), true)
+ var eiError *objectSDK.ECInfoError
+ require.ErrorAs(t, err, &eiError)
+ require.Equal(t, len(eiError.ECInfo().Chunks), len(parts))
+ for _, chunk := range eiError.ECInfo().Chunks {
+ var found bool
+ for _, part := range parts {
+ partID, _ := part.ID()
+ var chunkID oid.ID
+ require.NoError(t, chunkID.ReadFromV2(chunk.ID))
+ if chunkID.Equals(partID) {
+ found = true
+ }
+ }
+ if !found {
+ require.Fail(t, "chunk not found")
+ }
+ }
+ })
+
t.Run("get removed object", func(t *testing.T) {
obj := oidtest.Address()
- ts := oidtest.Address()
- require.NoError(t, metaInhume(db, obj, ts))
+ require.NoError(t, metaInhume(db, obj, oidtest.ID()))
_, err := metaGet(db, obj, false)
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
obj = oidtest.Address()
var prm meta.InhumePrm
prm.SetAddresses(obj)
- _, err = db.Inhume(prm)
+ _, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
_, err = metaGet(db, obj, false)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, client.IsErrObjectNotFound(err))
})
t.Run("expired object", func(t *testing.T) {
@@ -184,8 +213,6 @@ func BenchmarkGet(b *testing.B) {
}
}
-var obj *objectSDK.Object
-
func benchmarkGet(b *testing.B, numOfObj int) {
prepareDb := func(batchSize int) (*meta.DB, []oid.Address) {
db := newDB(b,
@@ -194,8 +221,8 @@ func benchmarkGet(b *testing.B, numOfObj int) {
)
addrs := make([]oid.Address, 0, numOfObj)
- for i := 0; i < numOfObj; i++ {
- raw := generateObject(b)
+ for range numOfObj {
+ raw := testutil.GenerateObject()
addrs = append(addrs, object.AddressOf(raw))
err := putBig(db, raw)
@@ -206,6 +233,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
}
db, addrs := prepareDb(runtime.NumCPU())
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
b.Run("parallel", func(b *testing.B) {
b.ReportAllocs()
@@ -217,7 +245,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
getPrm.SetAddress(addrs[counter%len(addrs)])
counter++
- _, err := db.Get(getPrm)
+ _, err := db.Get(context.Background(), getPrm)
if err != nil {
b.Fatal(err)
}
@@ -225,18 +253,18 @@ func benchmarkGet(b *testing.B, numOfObj int) {
})
})
- require.NoError(b, db.Close())
+ require.NoError(b, db.Close(context.Background()))
require.NoError(b, os.RemoveAll(b.Name()))
db, addrs = prepareDb(1)
b.Run("serial", func(b *testing.B) {
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for i := range b.N {
var getPrm meta.GetPrm
getPrm.SetAddress(addrs[i%len(addrs)])
- _, err := db.Get(getPrm)
+ _, err := db.Get(context.Background(), getPrm)
if err != nil {
b.Fatal(err)
}
@@ -249,6 +277,6 @@ func metaGet(db *meta.DB, addr oid.Address, raw bool) (*objectSDK.Object, error)
prm.SetAddress(addr)
prm.SetRaw(raw)
- res, err := db.Get(prm)
+ res, err := db.Get(context.Background(), prm)
return res.Header(), err
}
diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go
index 393c9f4d03..2f23d424c8 100644
--- a/pkg/local_object_storage/metabase/graveyard.go
+++ b/pkg/local_object_storage/metabase/graveyard.go
@@ -2,9 +2,14 @@ package meta
import (
"bytes"
+ "context"
"errors"
"fmt"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
)
@@ -57,7 +62,18 @@ func (g *GarbageIterationPrm) SetOffset(offset oid.Address) {
//
// If h returns ErrInterruptIterator, nil returns immediately.
// Returns other errors of h directly.
-func (db *DB) IterateOverGarbage(p GarbageIterationPrm) error {
+func (db *DB) IterateOverGarbage(ctx context.Context, p GarbageIterationPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverGarbage", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverGarbage")
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -65,9 +81,11 @@ func (db *DB) IterateOverGarbage(p GarbageIterationPrm) error {
return ErrDegradedMode
}
- return db.boltDB.View(func(tx *bbolt.Tx) error {
+ err := metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
return db.iterateDeletedObj(tx, gcHandler{p.h}, p.offset)
- })
+ }))
+ success = err == nil
+ return err
}
// TombstonedObject represents descriptor of the
@@ -111,7 +129,7 @@ func (g *GraveyardIterationPrm) SetHandler(h TombstonedHandler) {
// Note: if offset is not found in db, iteration starts
// from the element that WOULD BE the following after the
// offset if offset was presented. That means that it is
-// safe to delete offset element and pass if to the
+// safe to delete offset element and pass it to the
// iteration once again: iteration would start from the
// next element.
//
@@ -124,7 +142,18 @@ func (g *GraveyardIterationPrm) SetOffset(offset oid.Address) {
//
// If h returns ErrInterruptIterator, nil returns immediately.
// Returns other errors of h directly.
-func (db *DB) IterateOverGraveyard(p GraveyardIterationPrm) error {
+func (db *DB) IterateOverGraveyard(ctx context.Context, p GraveyardIterationPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverGraveyard", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverGraveyard")
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -132,9 +161,9 @@ func (db *DB) IterateOverGraveyard(p GraveyardIterationPrm) error {
return ErrDegradedMode
}
- return db.boltDB.View(func(tx *bbolt.Tx) error {
+ return metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
return db.iterateDeletedObj(tx, graveyardHandler{p.h}, p.offset)
- })
+ }))
}
type kvHandler interface {
@@ -148,7 +177,7 @@ type gcHandler struct {
func (g gcHandler) handleKV(k, _ []byte) error {
o, err := garbageFromKV(k)
if err != nil {
- return fmt.Errorf("could not parse garbage object: %w", err)
+ return fmt.Errorf("parse garbage object: %w", err)
}
return g.h(o)
@@ -161,7 +190,7 @@ type graveyardHandler struct {
func (g graveyardHandler) handleKV(k, v []byte) error {
o, err := graveFromKV(k, v)
if err != nil {
- return fmt.Errorf("could not parse grave: %w", err)
+ return fmt.Errorf("parse grave: %w", err)
}
return g.h(o)
@@ -211,7 +240,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address)
func garbageFromKV(k []byte) (res GarbageObject, err error) {
err = decodeAddressFromKey(&res.addr, k)
if err != nil {
- err = fmt.Errorf("could not parse address: %w", err)
+ err = fmt.Errorf("parse address: %w", err)
}
return
@@ -227,35 +256,58 @@ func graveFromKV(k, v []byte) (res TombstonedObject, err error) {
return
}
-// DropGraves deletes tombstoned objects from the
+// InhumeTombstones deletes tombstoned objects from the
// graveyard bucket.
//
// Returns any error appeared during deletion process.
-func (db *DB) DropGraves(tss []TombstonedObject) error {
+func (db *DB) InhumeTombstones(ctx context.Context, tss []TombstonedObject) (InhumeRes, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("InhumeTombstones", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.InhumeTombstones")
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
if db.mode.NoMetabase() {
- return ErrDegradedMode
+ return InhumeRes{}, ErrDegradedMode
} else if db.mode.ReadOnly() {
- return ErrReadOnlyMode
+ return InhumeRes{}, ErrReadOnlyMode
}
buf := make([]byte, addressKeySize)
+ prm := InhumePrm{forceRemoval: true}
+ currEpoch := db.epochState.CurrentEpoch()
- return db.boltDB.Update(func(tx *bbolt.Tx) error {
- bkt := tx.Bucket(graveyardBucketName)
- if bkt == nil {
- return nil
+ var res InhumeRes
+
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ res = InhumeRes{inhumedByCnrID: make(map[cid.ID]ObjectCounters)}
+
+ garbageBKT := tx.Bucket(garbageBucketName)
+ graveyardBKT := tx.Bucket(graveyardBucketName)
+
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
+ if err != nil {
+ return err
}
- for _, ts := range tss {
- err := bkt.Delete(addressKey(ts.Address(), buf))
- if err != nil {
+ for i := range tss {
+ if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, tss[i].Tombstone(), buf, currEpoch, prm, &res); err != nil {
+ return err
+ }
+ if err := graveyardBKT.Delete(addressKey(tss[i].Address(), buf)); err != nil {
return err
}
}
return nil
})
+ return res, err
}
diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go
index e18ef9114d..ebadecc044 100644
--- a/pkg/local_object_storage/metabase/graveyard_test.go
+++ b/pkg/local_object_storage/metabase/graveyard_test.go
@@ -1,10 +1,15 @@
package meta_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -12,6 +17,7 @@ import (
func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
var counter int
var iterGravePRM meta.GraveyardIterationPrm
@@ -21,7 +27,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
return nil
})
- err := db.IterateOverGraveyard(iterGravePRM)
+ err := db.IterateOverGraveyard(context.Background(), iterGravePRM)
require.NoError(t, err)
require.Zero(t, counter)
@@ -31,16 +37,17 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
return nil
})
- err = db.IterateOverGarbage(iterGCPRM)
+ err = db.IterateOverGarbage(context.Background(), iterGCPRM)
require.NoError(t, err)
require.Zero(t, counter)
}
func TestDB_Iterate_OffsetNotFound(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
- obj1 := generateObject(t)
- obj2 := generateObject(t)
+ obj1 := testutil.GenerateObject()
+ obj2 := testutil.GenerateObject()
var addr1 oid.Address
err := addr1.DecodeString("AUSF6rhReoAdPVKYUZWW9o2LbtTvekn54B3JXi7pdzmn/2daLhLB7yVXbjBaKkckkuvjX22BxRYuSHy9RPxuH9PZS")
@@ -67,7 +74,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1))
inhumePrm.SetGCMark()
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var counter int
@@ -81,7 +88,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
return nil
})
- err = db.IterateOverGarbage(iterGCPRM)
+ err = db.IterateOverGarbage(context.Background(), iterGCPRM)
require.NoError(t, err)
// the second object would be put after the
@@ -97,7 +104,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
return nil
})
- err = db.IterateOverGarbage(iterGCPRM)
+ err = db.IterateOverGarbage(context.Background(), iterGCPRM)
require.NoError(t, err)
// the third object would be put before the
@@ -108,12 +115,14 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
func TestDB_IterateDeletedObjects(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := generateObject(t)
- obj2 := generateObject(t)
- obj3 := generateObject(t)
- obj4 := generateObject(t)
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj3 := testutil.GenerateObjectWithCID(cnr)
+ obj4 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -133,18 +142,19 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
inhumePrm.SetAddresses(object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetGCMark()
// inhume with GC mark
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var (
@@ -162,7 +172,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
return nil
})
- err = db.IterateOverGraveyard(iterGravePRM)
+ err = db.IterateOverGraveyard(context.Background(), iterGravePRM)
require.NoError(t, err)
var iterGCPRM meta.GarbageIterationPrm
@@ -173,7 +183,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
return nil
})
- err = db.IterateOverGarbage(iterGCPRM)
+ err = db.IterateOverGarbage(context.Background(), iterGCPRM)
require.NoError(t, err)
// objects covered with a tombstone
@@ -194,12 +204,14 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := generateObject(t)
- obj2 := generateObject(t)
- obj3 := generateObject(t)
- obj4 := generateObject(t)
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj3 := testutil.GenerateObjectWithCID(cnr)
+ obj4 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -217,6 +229,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(
@@ -224,7 +237,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetTombstoneAddress(addrTombstone)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
expectedGraveyard := []oid.Address{
@@ -253,7 +266,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
return nil
})
- err = db.IterateOverGraveyard(iterGraveyardPrm)
+ err = db.IterateOverGraveyard(context.Background(), iterGraveyardPrm)
require.NoError(t, err)
require.Equal(t, firstIterationSize, counter)
require.Equal(t, firstIterationSize, len(gotGraveyard))
@@ -270,7 +283,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
return nil
})
- err = db.IterateOverGraveyard(iterGraveyardPrm)
+ err = db.IterateOverGraveyard(context.Background(), iterGraveyardPrm)
require.NoError(t, err)
require.Equal(t, len(expectedGraveyard), counter)
require.ElementsMatch(t, gotGraveyard, expectedGraveyard)
@@ -285,19 +298,20 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
return nil
})
- err = db.IterateOverGraveyard(iterGraveyardPrm)
+ err = db.IterateOverGraveyard(context.Background(), iterGraveyardPrm)
require.NoError(t, err)
require.False(t, iWasCalled)
}
func TestDB_IterateOverGarbage_Offset(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
// generate and put 4 objects
- obj1 := generateObject(t)
- obj2 := generateObject(t)
- obj3 := generateObject(t)
- obj4 := generateObject(t)
+ obj1 := testutil.GenerateObject()
+ obj2 := testutil.GenerateObject()
+ obj3 := testutil.GenerateObject()
+ obj4 := testutil.GenerateObject()
var err error
@@ -319,7 +333,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetGCMark()
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
expectedGarbage := []oid.Address{
@@ -346,7 +360,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
return nil
})
- err = db.IterateOverGarbage(iterGarbagePrm)
+ err = db.IterateOverGarbage(context.Background(), iterGarbagePrm)
require.NoError(t, err)
require.Equal(t, firstIterationSize, counter)
require.Equal(t, firstIterationSize, len(gotGarbage))
@@ -361,7 +375,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
return nil
})
- err = db.IterateOverGarbage(iterGarbagePrm)
+ err = db.IterateOverGarbage(context.Background(), iterGarbagePrm)
require.NoError(t, err)
require.Equal(t, len(expectedGarbage), counter)
require.ElementsMatch(t, gotGarbage, expectedGarbage)
@@ -376,17 +390,19 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
return nil
})
- err = db.IterateOverGarbage(iterGarbagePrm)
+ err = db.IterateOverGarbage(context.Background(), iterGarbagePrm)
require.NoError(t, err)
require.False(t, iWasCalled)
}
-func TestDB_DropGraves(t *testing.T) {
+func TestDB_InhumeTombstones(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+ cnr := cidtest.ID()
// generate and put 2 objects
- obj1 := generateObject(t)
- obj2 := generateObject(t)
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -396,14 +412,26 @@ func TestDB_DropGraves(t *testing.T) {
err = putBig(db, obj2)
require.NoError(t, err)
- // inhume with tombstone
- addrTombstone := oidtest.Address()
+ id1, _ := obj1.ID()
+ id2, _ := obj2.ID()
+ ts := objectSDK.NewTombstone()
+ ts.SetMembers([]oid.ID{id1, id2})
+ objTs := objectSDK.New()
+ objTs.SetContainerID(cnr)
+ objTs.SetType(objectSDK.TypeTombstone)
+
+ data, _ := ts.Marshal()
+ objTs.SetPayload(data)
+ require.NoError(t, objectSDK.CalculateAndSetID(objTs))
+ require.NoError(t, putBig(db, objTs))
+
+ addrTombstone := object.AddressOf(objTs)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
buriedTS := make([]meta.TombstonedObject, 0)
@@ -416,12 +444,15 @@ func TestDB_DropGraves(t *testing.T) {
return nil
})
- err = db.IterateOverGraveyard(iterGravePRM)
+ err = db.IterateOverGraveyard(context.Background(), iterGravePRM)
require.NoError(t, err)
require.Equal(t, 2, counter)
- err = db.DropGraves(buriedTS)
+ res, err := db.InhumeTombstones(context.Background(), buriedTS)
require.NoError(t, err)
+ require.EqualValues(t, 1, res.LogicInhumed())
+ require.EqualValues(t, 0, res.UserInhumed())
+ require.EqualValues(t, map[cid.ID]meta.ObjectCounters{cnr: {Logic: 1}}, res.InhumedByCnrID())
counter = 0
iterGravePRM.SetHandler(func(_ meta.TombstonedObject) error {
@@ -429,7 +460,7 @@ func TestDB_DropGraves(t *testing.T) {
return nil
})
- err = db.IterateOverGraveyard(iterGravePRM)
+ err = db.IterateOverGraveyard(context.Background(), iterGravePRM)
require.NoError(t, err)
require.Zero(t, counter)
}
diff --git a/pkg/local_object_storage/metabase/index_test.go b/pkg/local_object_storage/metabase/index_test.go
index eb7238a59f..45b9bc756c 100644
--- a/pkg/local_object_storage/metabase/index_test.go
+++ b/pkg/local_object_storage/metabase/index_test.go
@@ -1,9 +1,11 @@
package meta
import (
+ "crypto/rand"
"math"
- "math/rand"
+ mrand "math/rand"
"testing"
+ "time"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/stretchr/testify/require"
@@ -39,9 +41,10 @@ func Test_decodeList(t *testing.T) {
require.Error(t, err)
})
t.Run("random", func(t *testing.T) {
+ r := mrand.New(mrand.NewSource(time.Now().Unix()))
expected := make([][]byte, 20)
for i := range expected {
- expected[i] = make([]byte, rand.Uint32()%10)
+ expected[i] = make([]byte, r.Uint32()%10)
rand.Read(expected[i])
}
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index ce772576f6..76018fb611 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -2,13 +2,18 @@ package meta
import (
"bytes"
+ "context"
"errors"
"fmt"
+ "time"
+ storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
)
@@ -26,21 +31,34 @@ type InhumePrm struct {
// DeletionInfo contains details on deleted object.
type DeletionInfo struct {
- Size uint64
- CID cid.ID
+ Size uint64
+ CID cid.ID
+ IsUser bool
}
// InhumeRes encapsulates results of Inhume operation.
type InhumeRes struct {
- deletedLockObj []oid.Address
- availableImhumed uint64
- deletionDetails []DeletionInfo
+ deletedLockObj []oid.Address
+ logicInhumed uint64
+ userInhumed uint64
+ inhumedByCnrID map[cid.ID]ObjectCounters
+ deletionDetails []DeletionInfo
}
-// AvailableInhumed return number of available object
+// LogicInhumed return number of logic object
// that have been inhumed.
-func (i InhumeRes) AvailableInhumed() uint64 {
- return i.availableImhumed
+func (i InhumeRes) LogicInhumed() uint64 {
+ return i.logicInhumed
+}
+
+func (i InhumeRes) UserInhumed() uint64 {
+ return i.userInhumed
+}
+
+// InhumedByCnrID return number of object
+// that have been inhumed by container ID.
+func (i InhumeRes) InhumedByCnrID() map[cid.ID]ObjectCounters {
+ return i.inhumedByCnrID
}
// DeletedLockObjects returns deleted object of LOCK
@@ -64,11 +82,32 @@ func (i InhumeRes) GetDeletionInfoByIndex(target int) DeletionInfo {
// StoreDeletionInfo stores size of deleted object and associated container ID
// in corresponding arrays.
-func (i *InhumeRes) storeDeletionInfo(containerID cid.ID, deletedSize uint64) {
+func (i *InhumeRes) storeDeletionInfo(containerID cid.ID, deletedSize uint64, isUser bool) {
i.deletionDetails = append(i.deletionDetails, DeletionInfo{
- Size: deletedSize,
- CID: containerID,
+ Size: deletedSize,
+ CID: containerID,
+ IsUser: isUser,
})
+ i.logicInhumed++
+ if isUser {
+ i.userInhumed++
+ }
+
+ if v, ok := i.inhumedByCnrID[containerID]; ok {
+ v.Logic++
+ if isUser {
+ v.User++
+ }
+ i.inhumedByCnrID[containerID] = v
+ } else {
+ v = ObjectCounters{
+ Logic: 1,
+ }
+ if isUser {
+ v.User = 1
+ }
+ i.inhumedByCnrID[containerID] = v
+ }
}
// SetAddresses sets a list of object addresses that should be inhumed.
@@ -104,6 +143,20 @@ func (p *InhumePrm) SetForceGCMark() {
p.forceRemoval = true
}
+func (p *InhumePrm) validate() error {
+ if p == nil {
+ return nil
+ }
+ if p.tomb != nil {
+ for _, addr := range p.target {
+ if addr.Container() != p.tomb.Container() {
+ return fmt.Errorf("object %s and tombstone %s have different container ID", addr, p.tomb)
+ }
+ }
+ }
+ return nil
+}
+
var errBreakBucketForEach = errors.New("bucket ForEach break")
// ErrLockObjectRemoval is returned when inhume operation is being
@@ -118,156 +171,265 @@ var ErrLockObjectRemoval = logicerr.New("lock object removal")
//
// NOTE: Marks any object with GC mark (despite any prohibitions on operations
// with that object) if WithForceGCMark option has been provided.
-func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) {
+func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("Inhume", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Inhume")
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
+ if err := prm.validate(); err != nil {
+ return InhumeRes{}, err
+ }
+
if db.mode.NoMetabase() {
return InhumeRes{}, ErrDegradedMode
} else if db.mode.ReadOnly() {
return InhumeRes{}, ErrReadOnlyMode
}
+ res := InhumeRes{
+ inhumedByCnrID: make(map[cid.ID]ObjectCounters),
+ }
currEpoch := db.epochState.CurrentEpoch()
- var inhumed uint64
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ return db.inhumeTx(tx, currEpoch, prm, &res)
+ })
+ success = err == nil
+ if success {
+ for _, addr := range prm.target {
+ storagelog.Write(ctx, db.log,
+ storagelog.AddressField(addr),
+ storagelog.OpField("metabase INHUME"))
+ }
+ }
+ return res, metaerr.Wrap(err)
+}
- err = db.boltDB.Update(func(tx *bbolt.Tx) error {
- garbageBKT := tx.Bucket(garbageBucketName)
- graveyardBKT := tx.Bucket(graveyardBucketName)
+func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes) error {
+ garbageBKT := tx.Bucket(garbageBucketName)
+ graveyardBKT := tx.Bucket(graveyardBucketName)
- var (
- // target bucket of the operation, one of the:
- // 1. Graveyard if Inhume was called with a Tombstone
- // 2. Garbage if Inhume was called with a GC mark
- bkt *bbolt.Bucket
- // value that will be put in the bucket, one of the:
- // 1. tombstone address if Inhume was called with
- // a Tombstone
- // 2. zeroValue if Inhume was called with a GC mark
- value []byte
- )
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
+ if err != nil {
+ return err
+ }
- if prm.tomb != nil {
- bkt = graveyardBKT
- tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize))
+ buf := make([]byte, addressKeySize)
+ for i := range prm.target {
+ if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, prm.target[i], buf, epoch, prm, res); err != nil {
+ return err
+ }
+ }
- // it is forbidden to have a tomb-on-tomb in FrostFS,
- // so graveyard keys must not be addresses of tombstones
- data := bkt.Get(tombKey)
- if data != nil {
- err := bkt.Delete(tombKey)
- if err != nil {
- return fmt.Errorf("could not remove grave with tombstone key: %w", err)
- }
- }
+ return db.applyInhumeResToCounters(tx, res)
+}
- value = tombKey
- } else {
- bkt = garbageBKT
- value = zeroValue
+func (db *DB) inhumeTxSingle(bkt *bbolt.Bucket, value []byte, graveyardBKT, garbageBKT *bbolt.Bucket, addr oid.Address, buf []byte, epoch uint64, prm InhumePrm, res *InhumeRes) error {
+ id := addr.Object()
+ cnr := addr.Container()
+ tx := bkt.Tx()
+
+ // prevent locked objects to be inhumed
+ if !prm.forceRemoval && objectLocked(tx, cnr, id) {
+ return new(apistatus.ObjectLocked)
+ }
+
+ var lockWasChecked bool
+
+ // prevent lock objects to be inhumed
+ // if `Inhume` was called not with the
+ // `WithForceGCMark` option
+ if !prm.forceRemoval {
+ if isLockObject(tx, cnr, id) {
+ return ErrLockObjectRemoval
}
- buf := make([]byte, addressKeySize)
- for i := range prm.target {
- id := prm.target[i].Object()
- cnr := prm.target[i].Container()
+ lockWasChecked = true
+ }
- // prevent locked objects to be inhumed
- if !prm.forceRemoval && objectLocked(tx, cnr, id) {
- return apistatus.ObjectLocked{}
- }
+ obj, err := db.get(tx, addr, buf, false, true, epoch)
+ targetKey := addressKey(addr, buf)
+ var ecErr *objectSDK.ECInfoError
+ if err == nil {
+ err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
+ if err != nil {
+ return err
+ }
+ } else if errors.As(err, &ecErr) {
+ err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
+ if err != nil {
+ return err
+ }
+ }
- var lockWasChecked bool
+ if prm.tomb != nil {
+ var isTomb bool
+ isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
+ if err != nil {
+ return err
+ }
- // prevent lock objects to be inhumed
- // if `Inhume` was called not with the
- // `WithForceGCMark` option
- if !prm.forceRemoval {
- if isLockObject(tx, cnr, id) {
- return ErrLockObjectRemoval
- }
+ if isTomb {
+ return nil
+ }
+ }
- lockWasChecked = true
- }
+ // consider checking if target is already in graveyard?
+ err = bkt.Put(targetKey, value)
+ if err != nil {
+ return err
+ }
- obj, err := db.get(tx, prm.target[i], buf, false, true, currEpoch)
- targetKey := addressKey(prm.target[i], buf)
- if err == nil {
- containerID, _ := obj.ContainerID()
- if inGraveyardWithKey(targetKey, graveyardBKT, garbageBKT) == 0 {
- inhumed++
- res.storeDeletionInfo(containerID, obj.PayloadSize())
- }
+ if prm.lockObjectHandling {
+ // do not perform lock check if
+ // it was already called
+ if lockWasChecked {
+ // inhumed object is not of
+ // the LOCK type
+ return nil
+ }
- // if object is stored, and it is regular object then update bucket
- // with container size estimations
- if obj.Type() == object.TypeRegular {
- err := changeContainerSize(tx, cnr, obj.PayloadSize(), false)
- if err != nil {
- return err
- }
- }
- }
+ if isLockObject(tx, cnr, id) {
+ res.deletedLockObj = append(res.deletedLockObj, addr)
+ }
+ }
+ return nil
+}
- if prm.tomb != nil {
- targetIsTomb := false
-
- // iterate over graveyard and check if target address
- // is the address of tombstone in graveyard.
- err = bkt.ForEach(func(k, v []byte) error {
- // check if graveyard has record with key corresponding
- // to tombstone address (at least one)
- targetIsTomb = bytes.Equal(v, targetKey)
-
- if targetIsTomb {
- // break bucket iterator
- return errBreakBucketForEach
- }
-
- return nil
- })
- if err != nil && !errors.Is(err, errBreakBucketForEach) {
- return err
- }
-
- // do not add grave if target is a tombstone
- if targetIsTomb {
- continue
- }
-
- // if tombstone appears object must be
- // additionally marked with GC
- err = garbageBKT.Put(targetKey, zeroValue)
- if err != nil {
- return err
- }
- }
-
- // consider checking if target is already in graveyard?
- err = bkt.Put(targetKey, value)
+func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes,
+ garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket,
+ ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte,
+) error {
+ for _, chunk := range ecInfo.Chunks {
+ chunkBuf := make([]byte, addressKeySize)
+ var chunkAddr oid.Address
+ chunkAddr.SetContainer(cnr)
+ var chunkID oid.ID
+ err := chunkID.ReadFromV2(chunk.ID)
+ if err != nil {
+ return err
+ }
+ chunkAddr.SetObject(chunkID)
+ chunkObj, err := db.get(tx, chunkAddr, chunkBuf, false, true, epoch)
+ if err != nil {
+ return err
+ }
+ chunkKey := addressKey(chunkAddr, chunkBuf)
+ err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, chunkKey, cnr, chunkObj, res)
+ if err != nil {
+ return err
+ }
+ if tomb != nil {
+ _, err = db.markAsGC(graveyardBKT, garbageBKT, chunkKey)
if err != nil {
return err
}
+ }
+ err = targetBucket.Put(chunkKey, value)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
- if prm.lockObjectHandling {
- // do not perform lock check if
- // it was already called
- if lockWasChecked {
- // inhumed object is not of
- // the LOCK type
- continue
- }
+func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
+ if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil {
+ return err
+ }
+ if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil {
+ return err
+ }
- if isLockObject(tx, cnr, id) {
- res.deletedLockObj = append(res.deletedLockObj, prm.target[i])
- }
+ return db.updateContainerCounter(tx, res.inhumedByCnrID, false)
+}
+
+// getInhumeTargetBucketAndValue return target bucket to store inhume result and value that will be put in the bucket.
+//
+// target bucket of the operation, one of the:
+// 1. Graveyard if Inhume was called with a Tombstone
+// 2. Garbage if Inhume was called with a GC mark
+//
+// value that will be put in the bucket, one of the:
+// 1. tombstone address if Inhume was called with
+// a Tombstone
+// 2. zeroValue if Inhume was called with a GC mark
+func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
+ if prm.tomb != nil {
+ targetBucket = graveyardBKT
+ tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize))
+
+ // it is forbidden to have a tomb-on-tomb in FrostFS,
+ // so graveyard keys must not be addresses of tombstones
+ data := targetBucket.Get(tombKey)
+ if data != nil {
+ err := targetBucket.Delete(tombKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err)
}
}
- return db.updateCounter(tx, logical, inhumed, false)
- })
-
- res.availableImhumed = inhumed
-
- return
+ value = tombKey
+ } else {
+ targetBucket = garbageBKT
+ value = zeroValue
+ }
+ return targetBucket, value, nil
+}
+
+func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte) (bool, error) {
+ targetIsTomb := isTomb(graveyardBKT, addressKey)
+
+ // do not add grave if target is a tombstone
+ if targetIsTomb {
+ return true, nil
+ }
+
+ // if tombstone appears object must be
+ // additionally marked with GC
+ return false, garbageBKT.Put(addressKey, zeroValue)
+}
+
+func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *objectSDK.Object, res *InhumeRes) error {
+ containerID, _ := obj.ContainerID()
+ if inGraveyardWithKey(targetKey, graveyardBKT, garbageBKT) == 0 {
+ res.storeDeletionInfo(containerID, obj.PayloadSize(), IsUserObject(obj))
+ }
+
+ // if object is stored, and it is regular object then update bucket
+ // with container size estimations
+ if obj.Type() == objectSDK.TypeRegular {
+ err := changeContainerSize(tx, cnr, obj.PayloadSize(), false)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func isTomb(graveyardBucket *bbolt.Bucket, addressKey []byte) bool {
+ targetIsTomb := false
+
+ // iterate over graveyard and check if target address
+ // is the address of tombstone in graveyard.
+ // tombstone must have the same container ID as key.
+ c := graveyardBucket.Cursor()
+ containerPrefix := addressKey[:cidSize]
+ for k, v := c.Seek(containerPrefix); k != nil && bytes.HasPrefix(k, containerPrefix); k, v = c.Next() {
+ // check if graveyard has record with key corresponding
+ // to tombstone address (at least one)
+ targetIsTomb = bytes.Equal(v, addressKey)
+ if targetIsTomb {
+ break
+ }
+ }
+ return targetIsTomb
}
diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go
new file mode 100644
index 0000000000..1807132870
--- /dev/null
+++ b/pkg/local_object_storage/metabase/inhume_ec_test.go
@@ -0,0 +1,114 @@
+package meta
+
+import (
+ "context"
+ "path/filepath"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInhumeECObject(t *testing.T) {
+ t.Parallel()
+
+ db := New(
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochState{uint64(12)}),
+ )
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+ ecChunk := oidtest.ID()
+ ecChunk2 := oidtest.ID()
+ ecParent := oidtest.ID()
+ tombstoneID := oidtest.ID()
+
+ chunkObj := testutil.GenerateObjectWithCID(cnr)
+ chunkObj.SetID(ecChunk)
+ chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
+ chunkObj.SetPayloadSize(uint64(5))
+ chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0))
+
+ chunkObj2 := testutil.GenerateObjectWithCID(cnr)
+ chunkObj2.SetID(ecChunk2)
+ chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ chunkObj2.SetPayloadSize(uint64(10))
+ chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 1, 3, []byte{}, 0))
+
+ // put object with EC
+
+ var prm PutPrm
+ prm.SetObject(chunkObj)
+ prm.SetStorageID([]byte("0/0"))
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ prm.SetObject(chunkObj2)
+ _, err = db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ var ecChunkAddress oid.Address
+ ecChunkAddress.SetContainer(cnr)
+ ecChunkAddress.SetObject(ecChunk)
+
+ var ecParentAddress oid.Address
+ ecParentAddress.SetContainer(cnr)
+ ecParentAddress.SetObject(ecParent)
+
+ var chunkObjectAddress oid.Address
+ chunkObjectAddress.SetContainer(cnr)
+ chunkObjectAddress.SetObject(ecChunk)
+
+ var getPrm GetPrm
+
+ getPrm.SetAddress(ecChunkAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.NoError(t, err)
+
+ var ecInfoError *objectSDK.ECInfoError
+ getPrm.SetAddress(ecParentAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, &ecInfoError)
+ require.True(t, len(ecInfoError.ECInfo().Chunks) == 2 &&
+ ecInfoError.ECInfo().Chunks[0].Index == 0 &&
+ ecInfoError.ECInfo().Chunks[0].Total == 3)
+
+ // inhume Chunk
+ var inhumePrm InhumePrm
+ var tombAddress oid.Address
+ inhumePrm.SetAddresses(chunkObjectAddress)
+ res, err := db.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+ require.True(t, len(res.deletionDetails) == 1)
+ require.True(t, res.deletionDetails[0].Size == 5)
+
+ // inhume EC parent (like Delete does)
+ tombAddress.SetContainer(cnr)
+ tombAddress.SetObject(tombstoneID)
+ inhumePrm.SetAddresses(ecParentAddress)
+ inhumePrm.SetTombstoneAddress(tombAddress)
+ res, err = db.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+ // Previously deleted chunk shouldn't be in the details, because it is marked as garbage
+ require.True(t, len(res.deletionDetails) == 1)
+ require.True(t, res.deletionDetails[0].Size == 10)
+
+ getPrm.SetAddress(ecParentAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+
+ getPrm.SetAddress(ecChunkAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+}
diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go
index 09b101ad8e..786d10396b 100644
--- a/pkg/local_object_storage/metabase/inhume_test.go
+++ b/pkg/local_object_storage/metabase/inhume_test.go
@@ -1,11 +1,15 @@
package meta_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -13,76 +17,83 @@ import (
func TestDB_Inhume(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
- raw := generateObject(t)
- addAttribute(raw, "foo", "bar")
-
- tombstoneID := oidtest.Address()
+ raw := testutil.GenerateObject()
+ testutil.AddAttribute(raw, "foo", "bar")
err := putBig(db, raw)
require.NoError(t, err)
- err = metaInhume(db, object.AddressOf(raw), tombstoneID)
+ err = metaInhume(db, object.AddressOf(raw), oidtest.ID())
require.NoError(t, err)
_, err = metaExists(db, object.AddressOf(raw))
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
_, err = metaGet(db, object.AddressOf(raw), false)
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
}
func TestInhumeTombOnTomb(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
var (
err error
+ cnr = cidtest.ID()
addr1 = oidtest.Address()
addr2 = oidtest.Address()
addr3 = oidtest.Address()
+ addr4 = oidtest.Address()
inhumePrm meta.InhumePrm
existsPrm meta.ExistsPrm
)
+ addr1.SetContainer(cnr)
+ addr2.SetContainer(cnr)
+ addr3.SetContainer(cnr)
+ addr4.SetContainer(cnr)
+
inhumePrm.SetAddresses(addr1)
inhumePrm.SetTombstoneAddress(addr2)
// inhume addr1 via addr2
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
existsPrm.SetAddress(addr1)
// addr1 should become inhumed {addr1:addr2}
- _, err = db.Exists(existsPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ _, err = db.Exists(context.Background(), existsPrm)
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
inhumePrm.SetAddresses(addr3)
inhumePrm.SetTombstoneAddress(addr1)
// try to inhume addr3 via addr1
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
// record with {addr1:addr2} should be removed from graveyard
// as a tomb-on-tomb; metabase should return ObjectNotFound
// NOT ObjectAlreadyRemoved since that record has been removed
// from graveyard but addr1 is still marked with GC
- _, err = db.Exists(existsPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ _, err = db.Exists(context.Background(), existsPrm)
+ require.True(t, client.IsErrObjectNotFound(err))
existsPrm.SetAddress(addr3)
// addr3 should be inhumed {addr3: addr1}
- _, err = db.Exists(existsPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ _, err = db.Exists(context.Background(), existsPrm)
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
inhumePrm.SetAddresses(addr1)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ inhumePrm.SetTombstoneAddress(addr4)
// try to inhume addr1 (which is already a tombstone in graveyard)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
existsPrm.SetAddress(addr1)
@@ -90,32 +101,36 @@ func TestInhumeTombOnTomb(t *testing.T) {
// record with addr1 key should not appear in graveyard
// (tomb can not be inhumed) but should be kept as object
// with GC mark
- _, err = db.Exists(existsPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ _, err = db.Exists(context.Background(), existsPrm)
+ require.True(t, client.IsErrObjectNotFound(err))
}
func TestInhumeLocked(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
locked := oidtest.Address()
- err := db.Lock(locked.Container(), oidtest.ID(), []oid.ID{locked.Object()})
+ err := db.Lock(context.Background(), locked.Container(), oidtest.ID(), []oid.ID{locked.Object()})
require.NoError(t, err)
var prm meta.InhumePrm
prm.SetAddresses(locked)
- _, err = db.Inhume(prm)
+ _, err = db.Inhume(context.Background(), prm)
- var e apistatus.ObjectLocked
+ var e *apistatus.ObjectLocked
require.ErrorAs(t, err, &e)
}
-func metaInhume(db *meta.DB, target, tomb oid.Address) error {
+func metaInhume(db *meta.DB, target oid.Address, tomb oid.ID) error {
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(target)
- inhumePrm.SetTombstoneAddress(tomb)
+ var tombAddr oid.Address
+ tombAddr.SetContainer(target.Container())
+ tombAddr.SetObject(tomb)
+ inhumePrm.SetTombstoneAddress(tombAddr)
- _, err := db.Inhume(inhumePrm)
+ _, err := db.Inhume(context.Background(), inhumePrm)
return err
}
diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go
index 9741225df5..9cccd7dad5 100644
--- a/pkg/local_object_storage/metabase/iterators.go
+++ b/pkg/local_object_storage/metabase/iterators.go
@@ -1,27 +1,31 @@
package meta
import (
+ "context"
"errors"
- "fmt"
"strconv"
+ "time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ExpiredObject is a descriptor of expired object from DB.
type ExpiredObject struct {
- typ object.Type
+ typ objectSDK.Type
addr oid.Address
}
// Type returns type of the expired object.
-func (e *ExpiredObject) Type() object.Type {
+func (e *ExpiredObject) Type() objectSDK.Type {
return e.typ
}
@@ -43,7 +47,20 @@ var ErrInterruptIterator = logicerr.New("iterator is interrupted")
//
// If h returns ErrInterruptIterator, nil returns immediately.
// Returns other errors of h directly.
-func (db *DB) IterateExpired(epoch uint64, h ExpiredObjectHandler) error {
+func (db *DB) IterateExpired(ctx context.Context, epoch uint64, h ExpiredObjectHandler) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateExpired", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateExpired",
+ trace.WithAttributes(
+ attribute.String("epoch", strconv.FormatUint(epoch, 10)),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -51,129 +68,51 @@ func (db *DB) IterateExpired(epoch uint64, h ExpiredObjectHandler) error {
return ErrDegradedMode
}
- return db.boltDB.View(func(tx *bbolt.Tx) error {
+ err := metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
return db.iterateExpired(tx, epoch, h)
- })
+ }))
+ success = err == nil
+ return err
}
func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler) error {
- err := tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
- cidBytes := cidFromAttributeBucket(name, objectV2.SysAttributeExpEpoch)
- if cidBytes == nil {
- return nil
- }
-
- var cnrID cid.ID
- err := cnrID.Decode(cidBytes)
+ b := tx.Bucket(expEpochToObjectBucketName)
+ c := b.Cursor()
+ for k, _ := c.First(); k != nil; k, _ = c.Next() {
+ expiresAfter, cnr, obj, err := parseExpirationEpochKey(k)
if err != nil {
- return fmt.Errorf("could not parse container ID of expired bucket: %w", err)
- }
-
- return b.ForEach(func(expKey, _ []byte) error {
- bktExpired := b.Bucket(expKey)
- if bktExpired == nil {
- return nil
- }
-
- expiresAfter, err := strconv.ParseUint(string(expKey), 10, 64)
- if err != nil {
- return fmt.Errorf("could not parse expiration epoch: %w", err)
- } else if expiresAfter >= epoch {
- return nil
- }
-
- return bktExpired.ForEach(func(idKey, _ []byte) error {
- var id oid.ID
-
- err = id.Decode(idKey)
- if err != nil {
- return fmt.Errorf("could not parse ID of expired object: %w", err)
- }
-
- // Ignore locked objects.
- //
- // To slightly optimize performance we can check only REGULAR objects
- // (only they can be locked), but it's more reliable.
- if objectLocked(tx, cnrID, id) {
- return nil
- }
-
- var addr oid.Address
- addr.SetContainer(cnrID)
- addr.SetObject(id)
-
- return h(&ExpiredObject{
- typ: firstIrregularObjectType(tx, cnrID, idKey),
- addr: addr,
- })
- })
- })
- })
-
- if errors.Is(err, ErrInterruptIterator) {
- err = nil
- }
-
- return err
-}
-
-// IterateCoveredByTombstones iterates over all objects in DB which are covered
-// by tombstone with string address from tss. Locked objects are not included
-// (do not confuse with objects of type LOCK).
-//
-// If h returns ErrInterruptIterator, nil returns immediately.
-// Returns other errors of h directly.
-//
-// Does not modify tss.
-func (db *DB) IterateCoveredByTombstones(tss map[string]oid.Address, h func(oid.Address) error) error {
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- return db.boltDB.View(func(tx *bbolt.Tx) error {
- return db.iterateCoveredByTombstones(tx, tss, h)
- })
-}
-
-func (db *DB) iterateCoveredByTombstones(tx *bbolt.Tx, tss map[string]oid.Address, h func(oid.Address) error) error {
- bktGraveyard := tx.Bucket(graveyardBucketName)
-
- err := bktGraveyard.ForEach(func(k, v []byte) error {
- var addr oid.Address
- if err := decodeAddressFromKey(&addr, v); err != nil {
return err
}
- if _, ok := tss[addr.EncodeToString()]; ok {
- var addr oid.Address
-
- err := decodeAddressFromKey(&addr, k)
- if err != nil {
- return fmt.Errorf("could not parse address of the object under tombstone: %w", err)
- }
-
- if objectLocked(tx, addr.Container(), addr.Object()) {
- return nil
- }
-
- return h(addr)
+ // bucket keys ordered by epoch, no need to continue lookup
+ if expiresAfter >= epoch {
+ return nil
}
-
- return nil
- })
-
- if errors.Is(err, ErrInterruptIterator) {
- err = nil
+ if objectLocked(tx, cnr, obj) {
+ continue
+ }
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(obj)
+ objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
+ err = h(&ExpiredObject{
+ typ: firstIrregularObjectType(tx, cnr, objKey),
+ addr: addr,
+ })
+ if err == nil {
+ continue
+ }
+ if errors.Is(err, ErrInterruptIterator) {
+ return nil
+ }
+ return err
}
-
- return err
+ return nil
}
-func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID) error) error {
+func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error {
var cid cid.ID
var oid oid.ID
+ obj := objectSDK.New()
return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
b58CID, postfix := parseContainerIDWithPrefix(&cid, name)
@@ -183,7 +122,6 @@ func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID) error) error {
switch postfix {
case primaryPrefix,
- storageGroupPrefix,
lockersPrefix,
tombstonePrefix:
default:
@@ -191,8 +129,8 @@ func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID) error) error {
}
return b.ForEach(func(k, v []byte) error {
- if oid.Decode(k) == nil {
- return f(cid, oid)
+ if oid.Decode(k) == nil && obj.Unmarshal(v) == nil {
+ return f(cid, oid, obj)
}
return nil
diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go
index 3c3d0ea50b..4c9579965e 100644
--- a/pkg/local_object_storage/metabase/iterators_test.go
+++ b/pkg/local_object_storage/metabase/iterators_test.go
@@ -1,13 +1,15 @@
package meta_test
import (
+ "context"
"strconv"
"testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -15,27 +17,27 @@ import (
func TestDB_IterateExpired(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const epoch = 13
- mAlive := map[object.Type]oid.Address{}
- mExpired := map[object.Type]oid.Address{}
+ mAlive := map[objectSDK.Type]oid.Address{}
+ mExpired := map[objectSDK.Type]oid.Address{}
- for _, typ := range []object.Type{
- object.TypeRegular,
- object.TypeTombstone,
- object.TypeStorageGroup,
- object.TypeLock,
+ for _, typ := range []objectSDK.Type{
+ objectSDK.TypeRegular,
+ objectSDK.TypeTombstone,
+ objectSDK.TypeLock,
} {
mAlive[typ] = putWithExpiration(t, db, typ, epoch)
mExpired[typ] = putWithExpiration(t, db, typ, epoch-1)
}
- expiredLocked := putWithExpiration(t, db, object.TypeRegular, epoch-1)
+ expiredLocked := putWithExpiration(t, db, objectSDK.TypeRegular, epoch-1)
- require.NoError(t, db.Lock(expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()}))
+ require.NoError(t, db.Lock(context.Background(), expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()}))
- err := db.IterateExpired(epoch, func(exp *meta.ExpiredObject) error {
+ err := db.IterateExpired(context.Background(), epoch, func(exp *meta.ExpiredObject) error {
if addr, ok := mAlive[exp.Type()]; ok {
require.NotEqual(t, addr, exp.Address())
}
@@ -55,68 +57,12 @@ func TestDB_IterateExpired(t *testing.T) {
require.Empty(t, mExpired)
}
-func putWithExpiration(t *testing.T, db *meta.DB, typ object.Type, expiresAt uint64) oid.Address {
- obj := generateObject(t)
+func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt uint64) oid.Address {
+ obj := testutil.GenerateObject()
obj.SetType(typ)
- addAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(expiresAt, 10))
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(expiresAt, 10))
require.NoError(t, putBig(db, obj))
return object2.AddressOf(obj)
}
-
-func TestDB_IterateCoveredByTombstones(t *testing.T) {
- db := newDB(t)
-
- ts := oidtest.Address()
- protected1 := oidtest.Address()
- protected2 := oidtest.Address()
- protectedLocked := oidtest.Address()
- garbage := oidtest.Address()
-
- var prm meta.InhumePrm
- var err error
-
- prm.SetAddresses(protected1, protected2, protectedLocked)
- prm.SetTombstoneAddress(ts)
-
- _, err = db.Inhume(prm)
- require.NoError(t, err)
-
- prm.SetAddresses(garbage)
- prm.SetGCMark()
-
- _, err = db.Inhume(prm)
- require.NoError(t, err)
-
- var handled []oid.Address
-
- tss := map[string]oid.Address{
- ts.EncodeToString(): ts,
- }
-
- err = db.IterateCoveredByTombstones(tss, func(addr oid.Address) error {
- handled = append(handled, addr)
- return nil
- })
- require.NoError(t, err)
-
- require.Len(t, handled, 3)
- require.Contains(t, handled, protected1)
- require.Contains(t, handled, protected2)
- require.Contains(t, handled, protectedLocked)
-
- err = db.Lock(protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
- require.NoError(t, err)
-
- handled = handled[:0]
-
- err = db.IterateCoveredByTombstones(tss, func(addr oid.Address) error {
- handled = append(handled, addr)
- return nil
- })
- require.NoError(t, err)
-
- require.Len(t, handled, 2)
- require.NotContains(t, handled, protectedLocked)
-}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 93b7efb993..2a0bd7f6a1 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -1,12 +1,20 @@
package meta
import (
+ "bytes"
+ "context"
+ "time"
+
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ErrEndOfListing is returned from object listing with cursor
@@ -40,12 +48,12 @@ func (l *ListPrm) SetCursor(cursor *Cursor) {
// ListRes contains values returned from ListWithCursor operation.
type ListRes struct {
- addrList []objectcore.AddressWithType
+ addrList []objectcore.Info
cursor *Cursor
}
// AddressList returns addresses selected by ListWithCursor operation.
-func (l ListRes) AddressList() []objectcore.AddressWithType {
+func (l ListRes) AddressList() []objectcore.Info {
return l.addrList
}
@@ -54,13 +62,52 @@ func (l ListRes) Cursor() *Cursor {
return l.cursor
}
+// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
+type IterateOverContainersPrm struct {
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, objectSDK.Type, cid.ID) error
+}
+
+// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type IterateOverObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
+ // Handler function executed upon objects in db.
+ Handler func(context.Context, *objectcore.Info) error
+}
+
+// CountAliveObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type CountAliveObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
+}
+
// ListWithCursor lists physical objects available in metabase starting from
-// cursor. Includes objects of all types. Does not include inhumed objects.
+// cursor. Includes objects of all types. Does not include inhumed and expired
+// objects.
// Use cursor value from response for consecutive requests.
//
// Returns ErrEndOfListing if there are no more objects to return or count
-// parameter set to zero.
-func (db *DB) ListWithCursor(prm ListPrm) (res ListRes, err error) {
+// parameter is set to zero.
+func (db *DB) ListWithCursor(ctx context.Context, prm ListPrm) (res ListRes, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("ListWithCursor", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.ListWithCursor",
+ trace.WithAttributes(
+ attribute.Int("count", prm.count),
+ attribute.Bool("has_cursor", prm.cursor != nil),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -68,19 +115,20 @@ func (db *DB) ListWithCursor(prm ListPrm) (res ListRes, err error) {
return res, ErrDegradedMode
}
- result := make([]objectcore.AddressWithType, 0, prm.count)
+ result := make([]objectcore.Info, 0, prm.count)
err = db.boltDB.View(func(tx *bbolt.Tx) error {
res.addrList, res.cursor, err = db.listWithCursor(tx, result, prm.count, prm.cursor)
return err
})
-
- return res, err
+ success = err == nil
+ return res, metaerr.Wrap(err)
}
-func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.AddressWithType, count int, cursor *Cursor) ([]objectcore.AddressWithType, *Cursor, error) {
+func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int, cursor *Cursor) ([]objectcore.Info, *Cursor, error) {
threshold := cursor == nil // threshold is a flag to ignore cursor
var bucketName []byte
+ var err error
c := tx.Cursor()
name, _ := c.First()
@@ -91,10 +139,11 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.AddressWithType,
var containerID cid.ID
var offset []byte
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
+ bc := newBucketCache()
- var rawAddr = make([]byte, cidSize, addressKeySize)
+ rawAddr := make([]byte, cidSize, addressKeySize)
+
+ currEpoch := db.epochState.CurrentEpoch()
loop:
for ; name != nil; name, _ = c.Next() {
@@ -103,17 +152,15 @@ loop:
continue
}
- var objType object.Type
+ var objType objectSDK.Type
switch prefix {
case primaryPrefix:
- objType = object.TypeRegular
- case storageGroupPrefix:
- objType = object.TypeStorageGroup
+ objType = objectSDK.TypeRegular
case lockersPrefix:
- objType = object.TypeLock
+ objType = objectSDK.TypeLock
case tombstonePrefix:
- objType = object.TypeTombstone
+ objType = objectSDK.TypeTombstone
default:
continue
}
@@ -121,8 +168,11 @@ loop:
bkt := tx.Bucket(name)
if bkt != nil {
copy(rawAddr, cidRaw)
- result, offset, cursor = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
- result, count, cursor, threshold)
+ result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID,
+ result, count, cursor, threshold, currEpoch)
+ if err != nil {
+ return nil, nil, err
+ }
}
bucketName = name
if len(result) >= count {
@@ -137,8 +187,7 @@ loop:
if offset != nil {
// new slice is much faster but less memory efficient
// we need to copy, because offset exists during bbolt tx
- cursor.inBucketOffset = make([]byte, len(offset))
- copy(cursor.inBucketOffset, offset)
+ cursor.inBucketOffset = bytes.Clone(offset)
}
if len(result) == 0 {
@@ -147,41 +196,41 @@ loop:
// new slice is much faster but less memory efficient
// we need to copy, because bucketName exists during bbolt tx
- cursor.bucketName = make([]byte, len(bucketName))
- copy(cursor.bucketName, bucketName)
+ cursor.bucketName = bytes.Clone(bucketName)
return result, cursor, nil
}
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
// object to start selecting from. Ignores inhumed objects.
-func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
- objType object.Type, // type of the objects stored in the main bucket
- graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
+func selectNFromBucket(
+ bc *bucketCache,
+ bkt *bbolt.Bucket, // main bucket
+ objType objectSDK.Type, // type of the objects stored in the main bucket
cidRaw []byte, // container ID prefix, optimization
cnt cid.ID, // container ID
- to []objectcore.AddressWithType, // listing result
+ to []objectcore.Info, // listing result
limit int, // stop listing at `limit` items in result
cursor *Cursor, // start from cursor object
threshold bool, // ignore cursor and start immediately
-) ([]objectcore.AddressWithType, []byte, *Cursor) {
+ currEpoch uint64,
+) ([]objectcore.Info, []byte, *Cursor, error) {
if cursor == nil {
cursor = new(Cursor)
}
- count := len(to)
c := bkt.Cursor()
- k, _ := c.First()
+ k, v := c.First()
offset := cursor.inBucketOffset
if !threshold {
c.Seek(offset)
- k, _ = c.Next() // we are looking for objects _after_ the cursor
+ k, v = c.Next() // we are looking for objects _after_ the cursor
}
- for ; k != nil; k, _ = c.Next() {
- if count >= limit {
+ for ; k != nil; k, v = c.Next() {
+ if len(to) >= limit {
break
}
@@ -191,18 +240,43 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
}
offset = k
+ graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
+ garbageBkt := getGarbageBucket(bc, bkt.Tx())
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
continue
}
+ var o objectSDK.Object
+ if err := o.Unmarshal(v); err != nil {
+ return nil, nil, nil, err
+ }
+
+ expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
+ if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) {
+ continue
+ }
+
+ var isLinkingObj bool
+ var ecInfo *objectcore.ECInfo
+ if objType == objectSDK.TypeRegular {
+ isLinkingObj = isLinkObject(&o)
+ ecHeader := o.ECHeader()
+ if ecHeader != nil {
+ ecInfo = &objectcore.ECInfo{
+ ParentID: ecHeader.Parent(),
+ Index: ecHeader.Index(),
+ Total: ecHeader.Total(),
+ }
+ }
+ }
+
var a oid.Address
a.SetContainer(cnt)
a.SetObject(obj)
- to = append(to, objectcore.AddressWithType{Address: a, Type: objType})
- count++
+ to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
}
- return to, offset, cursor
+ return to, offset, cursor, nil
}
func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte) {
@@ -218,3 +292,211 @@ func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte)
return rawID, name[0]
}
+
+// IterateOverContainers lists physical containers available in metabase starting from first.
+func (db *DB) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverContainers", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverContainers",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateOverContainers(ctx, tx, prm)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm IterateOverContainersPrm) error {
+ var containerID cid.ID
+ for _, prefix := range [][]byte{{byte(primaryPrefix)}, {byte(lockersPrefix)}, {byte(tombstonePrefix)}} {
+ c := tx.Cursor()
+ for name, _ := c.Seek(prefix); name != nil && bytes.HasPrefix(name, prefix); name, _ = c.Next() {
+ cidRaw, _ := parseContainerIDWithPrefix(&containerID, name)
+ if cidRaw == nil {
+ continue
+ }
+ var cnt cid.ID
+ copy(cnt[:], containerID[:])
+ var objType objectSDK.Type
+ switch prefix[0] {
+ case primaryPrefix:
+ objType = objectSDK.TypeRegular
+ case lockersPrefix:
+ objType = objectSDK.TypeLock
+ case tombstonePrefix:
+ objType = objectSDK.TypeTombstone
+ default:
+ continue
+ }
+ err := prm.Handler(ctx, objType, cnt)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// IterateOverObjectsInContainer iterate over physical objects available in metabase starting from first.
+func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverObjectsInContainer", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverObjectsInContainer",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateOverObjectsInContainer(ctx, tx, prm)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, prm IterateOverObjectsInContainerPrm) error {
+ var prefix byte
+ switch prm.ObjectType {
+ case objectSDK.TypeRegular:
+ prefix = primaryPrefix
+ case objectSDK.TypeLock:
+ prefix = lockersPrefix
+ case objectSDK.TypeTombstone:
+ prefix = tombstonePrefix
+ default:
+ return nil
+ }
+ bucketName := []byte{prefix}
+ bucketName = append(bucketName, prm.ContainerID[:]...)
+
+ bkt := tx.Bucket(bucketName)
+ if bkt == nil {
+ return nil
+ }
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
+ c := bkt.Cursor()
+ k, v := c.First()
+
+ for ; k != nil; k, v = c.Next() {
+ var obj oid.ID
+ if err := obj.Decode(k); err != nil {
+ break
+ }
+
+ if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
+ continue
+ }
+
+ var isLinkingObj bool
+ var ecInfo *objectcore.ECInfo
+ if prm.ObjectType == objectSDK.TypeRegular {
+ var o objectSDK.Object
+ if err := o.Unmarshal(v); err != nil {
+ return err
+ }
+ isLinkingObj = isLinkObject(&o)
+ ecHeader := o.ECHeader()
+ if ecHeader != nil {
+ ecInfo = &objectcore.ECInfo{
+ ParentID: ecHeader.Parent(),
+ Index: ecHeader.Index(),
+ Total: ecHeader.Total(),
+ }
+ }
+ }
+
+ var a oid.Address
+ a.SetContainer(prm.ContainerID)
+ a.SetObject(obj)
+ objInfo := objectcore.Info{Address: a, Type: prm.ObjectType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
+ err := prm.Handler(ctx, &objInfo)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
+func (db *DB) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("CountAliveObjectsInBucket", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.CountAliveObjectsInBucket")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ var prefix byte
+ switch prm.ObjectType {
+ case objectSDK.TypeRegular:
+ prefix = primaryPrefix
+ case objectSDK.TypeLock:
+ prefix = lockersPrefix
+ case objectSDK.TypeTombstone:
+ prefix = tombstonePrefix
+ default:
+ return 0, nil
+ }
+ bucketName := []byte{prefix}
+ bucketName = append(bucketName, prm.ContainerID[:]...)
+ var count uint64
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ bkt := tx.Bucket(bucketName)
+ if bkt == nil {
+ return nil
+ }
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
+ c := bkt.Cursor()
+ k, _ := c.First()
+ for ; k != nil; k, _ = c.Next() {
+ if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
+ continue
+ }
+ count++
+ }
+ return nil
+ })
+ success = err == nil
+ return count, metaerr.Wrap(err)
+}
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index d18f101ad9..02985991cb 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -1,14 +1,19 @@
package meta_test
import (
+ "context"
"errors"
- "sort"
+ "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
@@ -16,6 +21,8 @@ import (
func BenchmarkListWithCursor(b *testing.B) {
db := listWithCursorPrepareDB(b)
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
+
b.Run("1 item", func(b *testing.B) {
benchmarkListWithCursor(b, db, 1)
})
@@ -32,8 +39,8 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
NoSync: true,
})) // faster single-thread generation
- obj := generateObject(b)
- for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes
+ obj := testutil.GenerateObject()
+ for i := range 100_000 { // should be a multiple of all batch sizes
obj.SetID(oidtest.ID())
if i%9 == 0 { // let's have 9 objects per container
obj.SetContainerID(cidtest.ID())
@@ -49,10 +56,10 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- res, err := db.ListWithCursor(prm)
+ for range b.N {
+ res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
- if err != meta.ErrEndOfListing {
+ if !errors.Is(err, meta.ErrEndOfListing) {
b.Fatalf("error: %v", err)
}
prm.SetCursor(nil)
@@ -65,75 +72,91 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
}
func TestLisObjectsWithCursor(t *testing.T) {
- db := newDB(t)
+ t.Parallel()
const (
+ currEpoch = 100
+ expEpoch = currEpoch - 1
containers = 5
- total = containers * 5 // regular + ts + sg + child + lock
+ total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired
)
- expected := make([]object.AddressWithType, 0, total)
+ db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ expected := make([]object.Info, 0, total)
// fill metabase with objects
- for i := 0; i < containers; i++ {
+ for range containers {
containerID := cidtest.ID()
// add one regular object
- obj := generateObjectWithCID(t, containerID)
+ obj := testutil.GenerateObjectWithCID(containerID)
obj.SetType(objectSDK.TypeRegular)
err := putBig(db, obj)
require.NoError(t, err)
- expected = append(expected, object.AddressWithType{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
+ expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
// add one tombstone
- obj = generateObjectWithCID(t, containerID)
+ obj = testutil.GenerateObjectWithCID(containerID)
obj.SetType(objectSDK.TypeTombstone)
err = putBig(db, obj)
require.NoError(t, err)
- expected = append(expected, object.AddressWithType{Address: object.AddressOf(obj), Type: objectSDK.TypeTombstone})
-
- // add one storage group
- obj = generateObjectWithCID(t, containerID)
- obj.SetType(objectSDK.TypeStorageGroup)
- err = putBig(db, obj)
- require.NoError(t, err)
- expected = append(expected, object.AddressWithType{Address: object.AddressOf(obj), Type: objectSDK.TypeStorageGroup})
+ expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeTombstone})
// add one lock
- obj = generateObjectWithCID(t, containerID)
+ obj = testutil.GenerateObjectWithCID(containerID)
obj.SetType(objectSDK.TypeLock)
err = putBig(db, obj)
require.NoError(t, err)
- expected = append(expected, object.AddressWithType{Address: object.AddressOf(obj), Type: objectSDK.TypeLock})
+ expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeLock})
// add one inhumed (do not include into expected)
- obj = generateObjectWithCID(t, containerID)
+ obj = testutil.GenerateObjectWithCID(containerID)
obj.SetType(objectSDK.TypeRegular)
err = putBig(db, obj)
require.NoError(t, err)
- ts := generateObjectWithCID(t, containerID)
- err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts))
+ ts := testutil.GenerateObjectWithCID(containerID)
+ err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts).Object())
require.NoError(t, err)
// add one child object (do not include parent into expected)
splitID := objectSDK.NewSplitID()
- parent := generateObjectWithCID(t, containerID)
- addAttribute(parent, "foo", "bar")
- child := generateObjectWithCID(t, containerID)
+ parent := testutil.GenerateObjectWithCID(containerID)
+ testutil.AddAttribute(parent, "foo", "bar")
+ child := testutil.GenerateObjectWithCID(containerID)
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
child.SetSplitID(splitID)
err = putBig(db, child)
require.NoError(t, err)
- expected = append(expected, object.AddressWithType{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
- }
+ expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
- expected = sortAddresses(expected)
+ // add expired object (do not include into expected)
+ obj = testutil.GenerateObjectWithCID(containerID)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
+ require.NoError(t, metaPut(db, obj, nil))
+
+ // add non-expired object (include into expected)
+ obj = testutil.GenerateObjectWithCID(containerID)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch))
+ require.NoError(t, metaPut(db, obj, nil))
+ expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
+
+ // add locked expired object (include into expected)
+ obj = testutil.GenerateObjectWithCID(containerID)
+ objID := oidtest.ID()
+ obj.SetID(objID)
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
+ require.NoError(t, metaPut(db, obj, nil))
+ require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID}))
+ expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
+ }
t.Run("success with various count", func(t *testing.T) {
for countPerReq := 1; countPerReq <= total; countPerReq++ {
- got := make([]object.AddressWithType, 0, total)
+ got := make([]object.Info, 0, total)
res, cursor, err := metaListWithCursor(db, uint32(countPerReq), nil)
require.NoError(t, err, "count:%d", countPerReq)
@@ -144,7 +167,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
expectedIterations--
}
- for i := 0; i < expectedIterations; i++ {
+ for range expectedIterations {
res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor)
require.NoError(t, err, "count:%d", countPerReq)
got = append(got, res...)
@@ -152,9 +175,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
_, _, err = metaListWithCursor(db, uint32(countPerReq), cursor)
require.ErrorIs(t, err, meta.ErrEndOfListing, "count:%d", countPerReq, cursor)
-
- got = sortAddresses(got)
- require.Equal(t, expected, got, "count:%d", countPerReq)
+ require.ElementsMatch(t, expected, got, "count:%d", countPerReq)
}
})
@@ -165,15 +186,18 @@ func TestLisObjectsWithCursor(t *testing.T) {
}
func TestAddObjectDuringListingWithCursor(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
const total = 5
expected := make(map[string]int, total)
// fill metabase with objects
- for i := 0; i < total; i++ {
- obj := generateObject(t)
+ for range total {
+ obj := testutil.GenerateObject()
err := putBig(db, obj)
require.NoError(t, err)
expected[object.AddressOf(obj).EncodeToString()] = 0
@@ -189,8 +213,8 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
}
// add new objects
- for i := 0; i < total; i++ {
- obj := generateObject(t)
+ for range total {
+ obj := testutil.GenerateObject()
err = putBig(db, obj)
require.NoError(t, err)
}
@@ -212,21 +236,69 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
for _, v := range expected {
require.Equal(t, 1, v)
}
-
}
-func sortAddresses(addrWithType []object.AddressWithType) []object.AddressWithType {
- sort.Slice(addrWithType, func(i, j int) bool {
- return addrWithType[i].Address.EncodeToString() < addrWithType[j].Address.EncodeToString()
- })
- return addrWithType
-}
-
-func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]object.AddressWithType, *meta.Cursor, error) {
+func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]object.Info, *meta.Cursor, error) {
var listPrm meta.ListPrm
listPrm.SetCount(count)
listPrm.SetCursor(cursor)
- r, err := db.ListWithCursor(listPrm)
+ r, err := db.ListWithCursor(context.Background(), listPrm)
return r.AddressList(), r.Cursor(), err
}
+
+func TestIterateOver(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ const total uint64 = 5
+ for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} {
+ var expected []*objectSDK.Object
+ // fill metabase with objects
+ cid := cidtest.ID()
+ for range total {
+ obj := testutil.GenerateObjectWithCID(cid)
+ obj.SetType(typ)
+ err := metaPut(db, obj, nil)
+ require.NoError(t, err)
+ expected = append(expected, obj)
+ }
+
+ var metaIter meta.IterateOverObjectsInContainerPrm
+ var count uint64
+ metaIter.Handler = func(context.Context, *object.Info) error {
+ count++
+ return nil
+ }
+ metaIter.ContainerID = cid
+ metaIter.ObjectType = typ
+ err := db.IterateOverObjectsInContainer(context.Background(), metaIter)
+ require.NoError(t, err)
+ require.Equal(t, total, count)
+
+ var metaCount meta.CountAliveObjectsInContainerPrm
+ metaCount.ContainerID = cid
+ metaCount.ObjectType = typ
+ res, err := db.CountAliveObjectsInContainer(context.Background(), metaCount)
+ require.NoError(t, err)
+ require.Equal(t, res, total)
+
+ err = metaDelete(db, object.AddressOf(expected[0]), object.AddressOf(expected[1]))
+ require.NoError(t, err)
+
+ res, err = db.CountAliveObjectsInContainer(context.Background(), metaCount)
+ require.NoError(t, err)
+ require.Equal(t, uint64(3), res)
+ }
+ var count int
+ var metaPrm meta.IterateOverContainersPrm
+ metaPrm.Handler = func(context.Context, objectSDK.Type, cidSDK.ID) error {
+ count++
+ return nil
+ }
+ err := db.IterateOverContainers(context.Background(), metaPrm)
+ require.NoError(t, err)
+ require.Equal(t, 3, count)
+}
diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go
index 0ecd8cfee6..f4cb9e53bc 100644
--- a/pkg/local_object_storage/metabase/lock.go
+++ b/pkg/local_object_storage/metabase/lock.go
@@ -2,18 +2,31 @@ package meta
import (
"bytes"
+ "context"
"fmt"
+ "slices"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
var bucketNameLocked = []byte{lockedPrefix}
+type keyValue struct {
+ Key []byte
+ Value []byte
+}
+
// returns name of the bucket with objects of type LOCK for specified container.
func bucketNameLockers(idCnr cid.ID, key []byte) []byte {
return bucketName(idCnr, lockersPrefix, key)
@@ -25,7 +38,23 @@ func bucketNameLockers(idCnr cid.ID, key []byte) []byte {
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
-func (db *DB) Lock(cnr cid.ID, locker oid.ID, locked []oid.ID) error {
+func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.ID) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("Lock", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Lock",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("locker", locker.EncodeToString()),
+ attribute.Int("locked_count", len(locked)),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -35,20 +64,23 @@ func (db *DB) Lock(cnr cid.ID, locker oid.ID, locked []oid.ID) error {
return ErrReadOnlyMode
}
- if len(locked) == 0 {
- panic("empty locked list")
- }
+ assert.False(len(locked) == 0, "empty locked list")
- // check if all objects are regular
+ err := db.lockInternal(locked, cnr, locker)
+ success = err == nil
+ return err
+}
+
+func (db *DB) lockInternal(locked []oid.ID, cnr cid.ID, locker oid.ID) error {
bucketKeysLocked := make([][]byte, len(locked))
for i := range locked {
bucketKeysLocked[i] = objectKey(locked[i], make([]byte, objectKeySize))
}
key := make([]byte, cidSize)
- return db.boltDB.Update(func(tx *bbolt.Tx) error {
- if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != object.TypeRegular {
- return logicerr.Wrap(apistatus.LockNonRegularObject{})
+ return metaerr.Wrap(db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != objectSDK.TypeRegular {
+ return logicerr.Wrap(new(apistatus.LockNonRegularObject))
}
bucketLocked := tx.Bucket(bucketNameLocked)
@@ -65,7 +97,6 @@ func (db *DB) Lock(cnr cid.ID, locker oid.ID, locked []oid.ID) error {
loop:
for i := range bucketKeysLocked {
- // decode list of already existing lockers
exLockers, err = decodeList(bucketLockedContainer.Get(bucketKeysLocked[i]))
if err != nil {
return fmt.Errorf("decode list of object lockers: %w", err)
@@ -77,14 +108,11 @@ func (db *DB) Lock(cnr cid.ID, locker oid.ID, locked []oid.ID) error {
}
}
- // update the list of lockers
updLockers, err = encodeList(append(exLockers, keyLocker))
if err != nil {
- // maybe continue for the best effort?
return fmt.Errorf("encode list of object lockers: %w", err)
}
- // write updated list of lockers
err = bucketLockedContainer.Put(bucketKeysLocked[i], updLockers)
if err != nil {
return fmt.Errorf("update list of object lockers: %w", err)
@@ -92,35 +120,53 @@ func (db *DB) Lock(cnr cid.ID, locker oid.ID, locked []oid.ID) error {
}
return nil
- })
+ }))
}
// FreeLockedBy unlocks all objects in DB which are locked by lockers.
-func (db *DB) FreeLockedBy(lockers []oid.Address) error {
+// Returns slice of unlocked object ID's or an error.
+func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("FreeLockedBy", time.Since(startedAt), success)
+ }()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
if db.mode.NoMetabase() {
- return ErrDegradedMode
+ return nil, ErrDegradedMode
}
- return db.boltDB.Update(func(tx *bbolt.Tx) error {
- var err error
+ var unlockedObjects []oid.Address
+ if err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
for i := range lockers {
- err = freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object())
+ unlocked, err := freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object())
if err != nil {
return err
}
+ unlockedObjects = append(unlockedObjects, unlocked...)
}
- return err
- })
+ return nil
+ }); err != nil {
+ return nil, metaerr.Wrap(err)
+ }
+ success = true
+ return unlockedObjects, nil
}
// checks if specified object is locked in the specified container.
func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
- bucketLocked := tx.Bucket(bucketNameLocked)
+ return objectLockedWithCache(nil, tx, idCnr, idObj)
+}
+
+func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
+ bucketLocked := getLockedBucket(bc, tx)
if bucketLocked != nil {
key := make([]byte, cidSize)
idCnr.Encode(key)
@@ -133,59 +179,125 @@ func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
return false
}
-// releases all records about the objects locked by the locker.
-//
-// Operation is very resource-intensive, which is caused by the admissibility
-// of multiple locks. Also, if we knew what objects are locked, it would be
-// possible to speed up the execution.
-func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) error {
+// return `LOCK` id's if specified object is locked in the specified container.
+func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
+ var lockers []oid.ID
bucketLocked := tx.Bucket(bucketNameLocked)
if bucketLocked != nil {
key := make([]byte, cidSize)
idCnr.Encode(key)
-
bucketLockedContainer := bucketLocked.Bucket(key)
if bucketLockedContainer != nil {
- keyLocker := objectKey(locker, key)
- return bucketLockedContainer.ForEach(func(k, v []byte) error {
- keyLockers, err := decodeList(v)
- if err != nil {
- return fmt.Errorf("decode list of lockers in locked bucket: %w", err)
+ binObjIDs, err := decodeList(bucketLockedContainer.Get(objectKey(idObj, key)))
+ if err != nil {
+ return nil, fmt.Errorf("decode list of object lockers: %w", err)
+ }
+ for _, binObjID := range binObjIDs {
+ var id oid.ID
+ if err = id.Decode(binObjID); err != nil {
+ return nil, err
}
+ lockers = append(lockers, id)
+ }
+ }
+ }
+ return lockers, nil
+}
- for i := range keyLockers {
- if bytes.Equal(keyLockers[i], keyLocker) {
- if len(keyLockers) == 1 {
- // locker was all alone
- err = bucketLockedContainer.Delete(k)
- if err != nil {
- return fmt.Errorf("delete locked object record from locked bucket: %w", err)
- }
- } else {
- // exclude locker
- keyLockers = append(keyLockers[:i], keyLockers[i+1:]...)
+// releases all records about the objects locked by the locker.
+// Returns slice of unlocked object ID's or an error.
+//
+// Operation is very resource-intensive, which is caused by the admissibility
+// of multiple locks. Also, if we knew what objects are locked, it would be
+// possible to speed up the execution.
+func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Address, error) {
+ var unlockedObjects []oid.Address
+ bucketLocked := tx.Bucket(bucketNameLocked)
+ if bucketLocked == nil {
+ return unlockedObjects, nil
+ }
- v, err = encodeList(keyLockers)
- if err != nil {
- return fmt.Errorf("encode updated list of lockers: %w", err)
- }
+ key := make([]byte, cidSize)
+ idCnr.Encode(key)
- // update the record
- err = bucketLockedContainer.Put(k, v)
- if err != nil {
- return fmt.Errorf("update list of lockers: %w", err)
- }
- }
+ bucketLockedContainer := bucketLocked.Bucket(key)
+ if bucketLockedContainer == nil {
+ return unlockedObjects, nil
+ }
- return nil
+ keyLocker := objectKey(locker, key)
+ updates := make([]keyValue, 0)
+ err := bucketLockedContainer.ForEach(func(k, v []byte) error {
+ keyLockers, err := decodeList(v)
+ if err != nil {
+ return fmt.Errorf("decode list of lockers in locked bucket: %w", err)
+ }
+
+ for i := range keyLockers {
+ if bytes.Equal(keyLockers[i], keyLocker) {
+ if len(keyLockers) == 1 {
+ updates = append(updates, keyValue{
+ Key: k,
+ Value: nil,
+ })
+
+ var id oid.ID
+ err = id.Decode(k)
+ if err != nil {
+ return fmt.Errorf("decode unlocked object id error: %w", err)
}
+
+ var addr oid.Address
+ addr.SetContainer(idCnr)
+ addr.SetObject(id)
+
+ unlockedObjects = append(unlockedObjects, addr)
+ } else {
+ // exclude locker
+ keyLockers = slices.Delete(keyLockers, i, i+1)
+
+ v, err = encodeList(keyLockers)
+ if err != nil {
+ return fmt.Errorf("encode updated list of lockers: %w", err)
+ }
+
+ updates = append(updates, keyValue{
+ Key: k,
+ Value: v,
+ })
}
return nil
- })
+ }
}
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
}
+ if err = applyBucketUpdates(bucketLockedContainer, updates); err != nil {
+ return nil, err
+ }
+
+ return unlockedObjects, nil
+}
+
+func applyBucketUpdates(bucket *bbolt.Bucket, updates []keyValue) error {
+ for _, update := range updates {
+ if update.Value == nil {
+ err := bucket.Delete(update.Key)
+ if err != nil {
+ return fmt.Errorf("delete locked object record from locked bucket: %w", err)
+ }
+ } else {
+ err := bucket.Put(update.Key, update.Value)
+ if err != nil {
+ return fmt.Errorf("update list of lockers: %w", err)
+ }
+ }
+ }
return nil
}
@@ -214,16 +326,64 @@ func (i IsLockedRes) Locked() bool {
// object is considered as non-locked.
//
// Returns only non-logical errors related to underlying database.
-func (db *DB) IsLocked(prm IsLockedPrm) (res IsLockedRes, err error) {
+func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IsLocked", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IsLocked",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
if db.mode.NoMetabase() {
return res, ErrDegradedMode
}
-
- return res, db.boltDB.View(func(tx *bbolt.Tx) error {
+ err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
res.locked = objectLocked(tx, prm.addr.Container(), prm.addr.Object())
return nil
- })
+ }))
+ success = err == nil
+ return res, err
+}
+
+// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found
+// object is considered as non-locked.
+//
+// Returns only non-logical errors related to underlying database.
+func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return res, ErrDegradedMode
+ }
+ err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
+ res, err = getLocks(tx, addr.Container(), addr.Object())
+ return nil
+ }))
+ success = err == nil
+ return res, err
}
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 9cfa9c5bc8..341ff9ad18 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -1,13 +1,15 @@
package meta_test
import (
+ "context"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
@@ -15,20 +17,22 @@ import (
)
func TestDB_Lock(t *testing.T) {
+ t.Parallel()
+
cnr := cidtest.ID()
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
t.Run("empty locked list", func(t *testing.T) {
- require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, nil) })
- require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, []oid.ID{}) })
+ require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
+ require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, []oid.ID{}) })
})
t.Run("(ir)regular", func(t *testing.T) {
- for _, typ := range [...]object.Type{
- object.TypeTombstone,
- object.TypeStorageGroup,
- object.TypeLock,
- object.TypeRegular,
+ for _, typ := range [...]objectSDK.Type{
+ objectSDK.TypeTombstone,
+ objectSDK.TypeLock,
+ objectSDK.TypeRegular,
} {
obj := objecttest.Object()
obj.SetType(typ)
@@ -38,13 +42,13 @@ func TestDB_Lock(t *testing.T) {
err := metaPut(db, obj, nil)
require.NoError(t, err, typ)
- var e apistatus.LockNonRegularObject
+ var e *apistatus.LockNonRegularObject
id, _ := obj.ID()
// try to lock it
- err = db.Lock(cnr, oidtest.ID(), []oid.ID{id})
- if typ == object.TypeRegular {
+ err = db.Lock(context.Background(), cnr, oidtest.ID(), []oid.ID{id})
+ if typ == objectSDK.TypeRegular {
require.NoError(t, err, typ)
} else {
require.ErrorAs(t, err, &e, typ)
@@ -63,29 +67,35 @@ func TestDB_Lock(t *testing.T) {
// check locking relation
- inhumePrm.SetAddresses(objAddr)
- _, err := db.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ var objLockedErr *apistatus.ObjectLocked
- inhumePrm.SetTombstoneAddress(oidtest.Address())
- _, err = db.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ inhumePrm.SetAddresses(objAddr)
+ _, err := db.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
+
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(objAddr.Container())
+ inhumePrm.SetTombstoneAddress(tombAddr)
+ _, err = db.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
// try to remove lock object
inhumePrm.SetAddresses(lockAddr)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.Error(t, err)
// check that locking relation has not been
// dropped
inhumePrm.SetAddresses(objAddr)
- _, err = db.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ _, err = db.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
- _, err = db.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ tombAddr = oidtest.Address()
+ tombAddr.SetContainer(objAddr.Container())
+ inhumePrm.SetTombstoneAddress(tombAddr)
+ _, err = db.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
})
t.Run("lock-unlock scenario", func(t *testing.T) {
@@ -94,9 +104,11 @@ func TestDB_Lock(t *testing.T) {
objAddr := objectcore.AddressOf(objs[0])
lockAddr := objectcore.AddressOf(lockObj)
+ var objLockedErr *apistatus.ObjectLocked
+
// try to inhume locked object using tombstone
- err := metaInhume(db, objAddr, lockAddr)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ err := metaInhume(db, objAddr, lockAddr.Object())
+ require.ErrorAs(t, err, &objLockedErr)
// free locked object
var inhumePrm meta.InhumePrm
@@ -104,19 +116,19 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetForceGCMark()
inhumePrm.SetLockObjectHandling()
- res, err := db.Inhume(inhumePrm)
+ res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 1)
require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0])
- err = db.FreeLockedBy([]oid.Address{lockAddr})
+ _, err = db.FreeLockedBy([]oid.Address{lockAddr})
require.NoError(t, err)
inhumePrm.SetAddresses(objAddr)
inhumePrm.SetGCMark()
// now we can inhume the object
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
})
@@ -133,24 +145,24 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetAddresses(objectcore.AddressOf(lockObj))
inhumePrm.SetLockObjectHandling()
- res, err := db.Inhume(inhumePrm)
+ res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 1)
require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0])
// unlock just objects that were locked by
// just removed locker
- err = db.FreeLockedBy([]oid.Address{res.DeletedLockObjects()[0]})
+ _, err = db.FreeLockedBy([]oid.Address{res.DeletedLockObjects()[0]})
require.NoError(t, err)
// removing objects after unlock
inhumePrm.SetGCMark()
- for i := 0; i < objsNum; i++ {
+ for i := range objsNum {
inhumePrm.SetAddresses(objectcore.AddressOf(objs[i]))
- res, err = db.Inhume(inhumePrm)
+ res, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 0)
}
@@ -163,19 +175,22 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetForceGCMark()
inhumePrm.SetAddresses(objectcore.AddressOf(lockObj))
- res, err := db.Inhume(inhumePrm)
+ res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 0)
})
}
func TestDB_Lock_Expired(t *testing.T) {
+ t.Parallel()
+
es := &epochState{e: 123}
db := newDB(t, meta.WithEpochState(es))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
// put an object
- addr := putWithExpiration(t, db, object.TypeRegular, 124)
+ addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124)
// expire the obj
es.e = 125
@@ -183,7 +198,7 @@ func TestDB_Lock_Expired(t *testing.T) {
require.ErrorIs(t, err, meta.ErrObjectIsExpired)
// lock the obj
- require.NoError(t, db.Lock(addr.Container(), oidtest.ID(), []oid.ID{addr.Object()}))
+ require.NoError(t, db.Lock(context.Background(), addr.Container(), oidtest.ID(), []oid.ID{addr.Object()}))
// object is expired but locked, thus, must be available
_, err = metaGet(db, addr, false)
@@ -191,7 +206,10 @@ func TestDB_Lock_Expired(t *testing.T) {
}
func TestDB_IsLocked(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
// existing and locked objs
@@ -201,7 +219,7 @@ func TestDB_IsLocked(t *testing.T) {
for _, obj := range objs {
prm.SetAddress(objectcore.AddressOf(obj))
- res, err := db.IsLocked(prm)
+ res, err := db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Locked())
@@ -211,7 +229,7 @@ func TestDB_IsLocked(t *testing.T) {
prm.SetAddress(oidtest.Address())
- res, err := db.IsLocked(prm)
+ res, err := db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Locked())
@@ -223,26 +241,26 @@ func TestDB_IsLocked(t *testing.T) {
var putPrm meta.PutPrm
putPrm.SetObject(obj)
- _, err = db.Put(putPrm)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
prm.SetAddress(objectcore.AddressOf(obj))
- res, err = db.IsLocked(prm)
+ res, err = db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Locked())
}
// putAndLockObj puts object, returns it and its locker.
-func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*object.Object, *object.Object) {
+func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*objectSDK.Object, *objectSDK.Object) {
cnr := cidtest.ID()
- lockedObjs := make([]*object.Object, 0, numOfLockedObjs)
+ lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs)
lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs)
- for i := 0; i < numOfLockedObjs; i++ {
- obj := generateObjectWithCID(t, cnr)
+ for range numOfLockedObjs {
+ obj := testutil.GenerateObjectWithCID(cnr)
err := putBig(db, obj)
require.NoError(t, err)
@@ -252,14 +270,14 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*object.Ob
lockedObjIDs = append(lockedObjIDs, id)
}
- lockObj := generateObjectWithCID(t, cnr)
+ lockObj := testutil.GenerateObjectWithCID(cnr)
lockID, _ := lockObj.ID()
- lockObj.SetType(object.TypeLock)
+ lockObj.SetType(objectSDK.TypeLock)
err := putBig(db, lockObj)
require.NoError(t, err)
- err = db.Lock(cnr, lockID, lockedObjIDs)
+ err = db.Lock(context.Background(), cnr, lockID, lockedObjIDs)
require.NoError(t, err)
return lockedObjs, lockObj
diff --git a/pkg/local_object_storage/metabase/metrics.go b/pkg/local_object_storage/metabase/metrics.go
new file mode 100644
index 0000000000..d673560c77
--- /dev/null
+++ b/pkg/local_object_storage/metabase/metrics.go
@@ -0,0 +1,23 @@
+package meta
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+type Metrics interface {
+ SetParentID(parentID string)
+
+ SetMode(m mode.ComponentMode)
+ Close()
+
+ AddMethodDuration(method string, d time.Duration, success bool)
+}
+
+type noopMetrics struct{}
+
+func (m *noopMetrics) SetParentID(string) {}
+func (m *noopMetrics) SetMode(mode.ComponentMode) {}
+func (m *noopMetrics) Close() {}
+func (m *noopMetrics) AddMethodDuration(string, time.Duration, bool) {}
diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go
index dd1cdc900f..7edb963841 100644
--- a/pkg/local_object_storage/metabase/mode.go
+++ b/pkg/local_object_storage/metabase/mode.go
@@ -1,6 +1,7 @@
package meta
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -8,7 +9,7 @@ import (
// SetMode sets the metabase mode of operation.
// If the mode assumes no operation metabase, the database is closed.
-func (db *DB) SetMode(m mode.Mode) error {
+func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
@@ -17,28 +18,24 @@ func (db *DB) SetMode(m mode.Mode) error {
}
if !db.mode.NoMetabase() {
- if err := db.Close(); err != nil {
- return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ if err := db.Close(ctx); err != nil {
+ return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
- var err error
- switch {
- case m.NoMetabase():
+ if m.NoMetabase() {
db.boltDB = nil
- case m.ReadOnly():
- err = db.Open(true)
- default:
- err = db.Open(false)
- }
- if err == nil && !m.NoMetabase() && !m.ReadOnly() {
- err = db.Init()
- }
-
- if err != nil {
- return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ } else {
+ err := db.openDB(ctx, m)
+ if err == nil && !m.ReadOnly() {
+ err = db.Init(ctx)
+ }
+ if err != nil {
+ return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ }
}
db.mode = m
+ db.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
return nil
}
diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go
new file mode 100644
index 0000000000..28b42283f9
--- /dev/null
+++ b/pkg/local_object_storage/metabase/mode_test.go
@@ -0,0 +1,37 @@
+package meta
+
+import (
+ "context"
+ "path/filepath"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "github.com/stretchr/testify/require"
+)
+
+type epochStateTest struct{}
+
+func (s epochStateTest) CurrentEpoch() uint64 {
+ return 0
+}
+
+func Test_Mode(t *testing.T) {
+ t.Parallel()
+ bdb := New([]Option{
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochStateTest{}),
+ }...)
+
+ require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly))
+ require.Nil(t, bdb.boltDB)
+ require.NoError(t, bdb.Init(context.Background()))
+ require.Nil(t, bdb.boltDB)
+ require.NoError(t, bdb.Close(context.Background()))
+
+ require.NoError(t, bdb.Open(context.Background(), mode.Degraded))
+ require.Nil(t, bdb.boltDB)
+ require.NoError(t, bdb.Init(context.Background()))
+ require.Nil(t, bdb.boltDB)
+ require.NoError(t, bdb.Close(context.Background()))
+}
diff --git a/pkg/local_object_storage/metabase/movable.go b/pkg/local_object_storage/metabase/movable.go
deleted file mode 100644
index e6990dc547..0000000000
--- a/pkg/local_object_storage/metabase/movable.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package meta
-
-import (
- "fmt"
-
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
-)
-
-// ToMoveItPrm groups the parameters of ToMoveIt operation.
-type ToMoveItPrm struct {
- addr oid.Address
-}
-
-// ToMoveItRes groups the resulting values of ToMoveIt operation.
-type ToMoveItRes struct{}
-
-// SetAddress sets address of the object to move into another shard.
-func (p *ToMoveItPrm) SetAddress(addr oid.Address) {
- p.addr = addr
-}
-
-// DoNotMovePrm groups the parameters of DoNotMove operation.
-type DoNotMovePrm struct {
- addr oid.Address
-}
-
-// DoNotMoveRes groups the resulting values of DoNotMove operation.
-type DoNotMoveRes struct{}
-
-// SetAddress sets address of the object to prevent moving into another shard.
-func (p *DoNotMovePrm) SetAddress(addr oid.Address) {
- p.addr = addr
-}
-
-// MovablePrm groups the parameters of Movable operation.
-type MovablePrm struct{}
-
-// MovableRes groups the resulting values of Movable operation.
-type MovableRes struct {
- addrList []oid.Address
-}
-
-// AddressList returns resulting addresses of Movable operation.
-func (p MovableRes) AddressList() []oid.Address {
- return p.addrList
-}
-
-// ToMoveIt marks objects to move it into another shard. This useful for
-// faster HRW fetching.
-func (db *DB) ToMoveIt(prm ToMoveItPrm) (res ToMoveItRes, err error) {
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return res, ErrDegradedMode
- } else if db.mode.ReadOnly() {
- return res, ErrReadOnlyMode
- }
-
- key := make([]byte, addressKeySize)
- key = addressKey(prm.addr, key)
-
- err = db.boltDB.Update(func(tx *bbolt.Tx) error {
- toMoveIt := tx.Bucket(toMoveItBucketName)
- return toMoveIt.Put(key, zeroValue)
- })
-
- return
-}
-
-// DoNotMove removes `MoveIt` mark from the object.
-func (db *DB) DoNotMove(prm DoNotMovePrm) (res DoNotMoveRes, err error) {
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return res, ErrDegradedMode
- } else if db.mode.ReadOnly() {
- return res, ErrReadOnlyMode
- }
-
- key := make([]byte, addressKeySize)
- key = addressKey(prm.addr, key)
-
- err = db.boltDB.Update(func(tx *bbolt.Tx) error {
- toMoveIt := tx.Bucket(toMoveItBucketName)
- return toMoveIt.Delete(key)
- })
-
- return
-}
-
-// Movable returns list of marked objects to move into other shard.
-func (db *DB) Movable(_ MovablePrm) (MovableRes, error) {
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return MovableRes{}, ErrDegradedMode
- }
-
- var strAddrs []string
-
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- toMoveIt := tx.Bucket(toMoveItBucketName)
- return toMoveIt.ForEach(func(k, v []byte) error {
- strAddrs = append(strAddrs, string(k))
-
- return nil
- })
- })
- if err != nil {
- return MovableRes{}, err
- }
-
- // we can parse strings to structures in-place, but probably it seems
- // more efficient to keep bolt db TX code smaller because it might be
- // bottleneck.
- addrs := make([]oid.Address, len(strAddrs))
-
- for i := range strAddrs {
- err = decodeAddressFromKey(&addrs[i], []byte(strAddrs[i]))
- if err != nil {
- return MovableRes{}, fmt.Errorf("can't parse object address %v: %w",
- strAddrs[i], err)
- }
- }
-
- return MovableRes{
- addrList: addrs,
- }, nil
-}
diff --git a/pkg/local_object_storage/metabase/movable_test.go b/pkg/local_object_storage/metabase/movable_test.go
deleted file mode 100644
index 7564f81bd1..0000000000
--- a/pkg/local_object_storage/metabase/movable_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package meta_test
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/stretchr/testify/require"
-)
-
-func TestDB_Movable(t *testing.T) {
- db := newDB(t)
-
- raw1 := generateObject(t)
- raw2 := generateObject(t)
-
- // put two objects in metabase
- err := putBig(db, raw1)
- require.NoError(t, err)
-
- err = putBig(db, raw2)
- require.NoError(t, err)
-
- // check if toMoveIt index empty
- toMoveList, err := metaMovable(db)
- require.NoError(t, err)
- require.Len(t, toMoveList, 0)
-
- // mark to move object2
- err = metaToMoveIt(db, object.AddressOf(raw2))
- require.NoError(t, err)
-
- // check if toMoveIt index contains address of object 2
- toMoveList, err = metaMovable(db)
- require.NoError(t, err)
- require.Len(t, toMoveList, 1)
- require.Contains(t, toMoveList, object.AddressOf(raw2))
-
- // remove from toMoveIt index non existing address
- err = metaDoNotMove(db, object.AddressOf(raw1))
- require.NoError(t, err)
-
- // check if toMoveIt index hasn't changed
- toMoveList, err = metaMovable(db)
- require.NoError(t, err)
- require.Len(t, toMoveList, 1)
-
- // remove from toMoveIt index existing address
- err = metaDoNotMove(db, object.AddressOf(raw2))
- require.NoError(t, err)
-
- // check if toMoveIt index is empty now
- toMoveList, err = metaMovable(db)
- require.NoError(t, err)
- require.Len(t, toMoveList, 0)
-}
-
-func metaToMoveIt(db *meta.DB, addr oid.Address) error {
- var toMovePrm meta.ToMoveItPrm
- toMovePrm.SetAddress(addr)
-
- _, err := db.ToMoveIt(toMovePrm)
- return err
-}
-
-func metaMovable(db *meta.DB) ([]oid.Address, error) {
- r, err := db.Movable(meta.MovablePrm{})
- if err != nil {
- return nil, err
- }
-
- return r.AddressList(), nil
-}
-
-func metaDoNotMove(db *meta.DB, addr oid.Address) error {
- var doNotMovePrm meta.DoNotMovePrm
- doNotMovePrm.SetAddress(addr)
-
- _, err := db.DoNotMove(doNotMovePrm)
- return err
-}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 06b1d3ac8f..5e1bbfe9e9 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -1,19 +1,27 @@
package meta
import (
+ "context"
"encoding/binary"
"errors"
"fmt"
gio "io"
+ "strconv"
+ "time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type (
@@ -27,10 +35,14 @@ type PutPrm struct {
obj *objectSDK.Object
id []byte
+
+ indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
-type PutRes struct{}
+type PutRes struct {
+ Inserted bool
+}
// SetObject is a Put option to set object to save.
func (p *PutPrm) SetObject(obj *objectSDK.Object) {
@@ -42,17 +54,34 @@ func (p *PutPrm) SetStorageID(id []byte) {
p.id = id
}
+func (p *PutPrm) SetIndexAttributes(v bool) {
+ p.indexAttributes = v
+}
+
var (
- ErrUnknownObjectType = errors.New("unknown object type")
- ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it")
- ErrIncorrectRootObject = errors.New("invalid root object")
+ ErrUnknownObjectType = errors.New("unknown object type")
+ ErrIncorrectRootObject = errors.New("invalid root object")
)
// Put saves object header in metabase. Object payload expected to be cut.
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (db *DB) Put(prm PutPrm) (res PutRes, err error) {
+func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("Put", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Put",
+ trace.WithAttributes(
+ attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -65,79 +94,104 @@ func (db *DB) Put(prm PutPrm) (res PutRes, err error) {
currEpoch := db.epochState.CurrentEpoch()
err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
- return db.put(tx, prm.obj, prm.id, nil, currEpoch)
+ var e error
+ res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch, prm.indexAttributes)
+ return e
})
if err == nil {
- storagelog.Write(db.log,
+ success = true
+ storagelog.Write(ctx, db.log,
storagelog.AddressField(objectCore.AddressOf(prm.obj)),
storagelog.OpField("metabase PUT"))
}
- return
+ return res, metaerr.Wrap(err)
}
-func (db *DB) put(
- tx *bbolt.Tx, obj *objectSDK.Object, id []byte,
- si *objectSDK.SplitInfo, currEpoch uint64) error {
+func (db *DB) put(tx *bbolt.Tx,
+ obj *objectSDK.Object,
+ id []byte,
+ si *objectSDK.SplitInfo,
+ currEpoch uint64,
+ indexAttributes bool,
+) (PutRes, error) {
cnr, ok := obj.ContainerID()
if !ok {
- return errors.New("missing container in object")
+ return PutRes{}, errors.New("missing container in object")
+ }
+
+ var ecParentAddress oid.Address
+ if ecHeader := obj.ECHeader(); ecHeader != nil {
+ ecParentAddress.SetContainer(cnr)
+ ecParentAddress.SetObject(ecHeader.Parent())
}
isParent := si != nil
- exists, err := db.exists(tx, object.AddressOf(obj), currEpoch)
+ exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch)
+ var splitInfoError *objectSDK.SplitInfoError
if errors.As(err, &splitInfoError) {
exists = true // object exists, however it is virtual
} else if err != nil {
- return err // return any error besides SplitInfoError
+ return PutRes{}, err // return any error besides SplitInfoError
}
+ if exists {
+ return PutRes{}, db.updateObj(tx, obj, id, si, isParent)
+ }
+
+ return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch, indexAttributes)
+}
+
+func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error {
// most right child and split header overlap parent so we have to
// check if object exists to not overwrite it twice
- if exists {
- // When storage engine moves objects between different sub-storages,
- // it calls metabase.Put method with new storage ID, thus triggering this code.
- if !isParent && id != nil {
- return updateStorageID(tx, object.AddressOf(obj), id)
- }
- // when storage already has last object in split hierarchy and there is
- // a linking object to put (or vice versa), we should update split info
- // with object ids of these objects
- if isParent {
- return updateSplitInfo(tx, object.AddressOf(obj), si)
- }
-
- return nil
+ // When storage engine moves objects between different sub-storages,
+ // it calls metabase.Put method with new storage ID, thus triggering this code.
+ if !isParent && id != nil {
+ return setStorageID(tx, objectCore.AddressOf(obj), id, true)
}
+ // when storage already has last object in split hierarchy and there is
+ // a linking object to put (or vice versa), we should update split info
+ // with object ids of these objects
+ if isParent {
+ return updateSplitInfo(tx, objectCore.AddressOf(obj), si)
+ }
+
+ return nil
+}
+
+func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64, indexAttributes bool) error {
if par := obj.Parent(); par != nil && !isParent { // limit depth by two
parentSI, err := splitInfoFromObject(obj)
if err != nil {
return err
}
- err = db.put(tx, par, id, parentSI, currEpoch)
+ _, err = db.put(tx, par, id, parentSI, currEpoch, indexAttributes)
if err != nil {
return err
}
}
- err = putUniqueIndexes(tx, obj, si, id)
+ err := putUniqueIndexes(tx, obj, si, id)
if err != nil {
- return fmt.Errorf("can't put unique indexes: %w", err)
+ return fmt.Errorf("put unique indexes: %w", err)
}
err = updateListIndexes(tx, obj, putListIndexItem)
if err != nil {
- return fmt.Errorf("can't put list indexes: %w", err)
+ return fmt.Errorf("put list indexes: %w", err)
}
- err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
- if err != nil {
- return fmt.Errorf("can't put fake bucket tree indexes: %w", err)
+ if indexAttributes {
+ err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
+ if err != nil {
+ return fmt.Errorf("put fake bucket tree indexes: %w", err)
+ }
}
// update container volume size estimation
@@ -149,103 +203,131 @@ func (db *DB) put(
}
if !isParent {
- err = db.updateCounter(tx, phy, 1, true)
- if err != nil {
- return fmt.Errorf("could not increase phy object counter: %w", err)
- }
-
- // it is expected that putting an unavailable object is
- // impossible and should be handled on the higher levels
- err = db.updateCounter(tx, logical, 1, true)
- if err != nil {
- return fmt.Errorf("could not increase logical object counter: %w", err)
+ if err = db.incCounters(tx, cnr, IsUserObject(obj)); err != nil {
+ return err
}
}
return nil
}
-func putUniqueIndexes(
- tx *bbolt.Tx,
- obj *objectSDK.Object,
- si *objectSDK.SplitInfo,
- id []byte,
-) error {
+func putUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, si *objectSDK.SplitInfo, id []byte) error {
isParent := si != nil
- addr := object.AddressOf(obj)
- cnr := addr.Container()
+ addr := objectCore.AddressOf(obj)
objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
bucketName := make([]byte, bucketKeySize)
- // add value to primary unique bucket
if !isParent {
- switch obj.Type() {
- case objectSDK.TypeRegular:
- bucketName = primaryBucketName(cnr, bucketName)
- case objectSDK.TypeTombstone:
- bucketName = tombstoneBucketName(cnr, bucketName)
- case objectSDK.TypeStorageGroup:
- bucketName = storageGroupBucketName(cnr, bucketName)
- case objectSDK.TypeLock:
- bucketName = bucketNameLockers(cnr, bucketName)
- default:
- return ErrUnknownObjectType
- }
-
- rawObject, err := obj.CutPayload().Marshal()
- if err != nil {
- return fmt.Errorf("can't marshal object header: %w", err)
- }
-
- err = putUniqueIndexItem(tx, namedBucketItem{
- name: bucketName,
- key: objKey,
- val: rawObject,
- })
+ err := putRawObjectData(tx, obj, bucketName, addr, objKey)
if err != nil {
return err
}
-
- // index storageID if it is present
if id != nil {
- err = putUniqueIndexItem(tx, namedBucketItem{
- name: smallBucketName(cnr, bucketName),
- key: objKey,
- val: id,
- })
- if err != nil {
+ if err = setStorageID(tx, objectCore.AddressOf(obj), id, false); err != nil {
return err
}
}
}
- // index root object
- if obj.Type() == objectSDK.TypeRegular && !obj.HasParent() {
- var (
- err error
- splitInfo []byte
- )
+ if err := putExpirationEpoch(tx, obj, addr, objKey); err != nil {
+ return err
+ }
- if isParent {
- splitInfo, err = si.Marshal()
- if err != nil {
- return fmt.Errorf("can't marshal split info: %w", err)
- }
+ return putSplitInfo(tx, obj, bucketName, addr, si, objKey)
+}
+
+func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, objKey []byte) error {
+ switch obj.Type() {
+ case objectSDK.TypeRegular:
+ bucketName = primaryBucketName(addr.Container(), bucketName)
+ case objectSDK.TypeTombstone:
+ bucketName = tombstoneBucketName(addr.Container(), bucketName)
+ case objectSDK.TypeLock:
+ bucketName = bucketNameLockers(addr.Container(), bucketName)
+ default:
+ return ErrUnknownObjectType
+ }
+ rawObject, err := obj.CutPayload().Marshal()
+ if err != nil {
+ return fmt.Errorf("marshal object header: %w", err)
+ }
+ return putUniqueIndexItem(tx, namedBucketItem{
+ name: bucketName,
+ key: objKey,
+ val: rawObject,
+ })
+}
+
+func putExpirationEpoch(tx *bbolt.Tx, obj *objectSDK.Object, addr oid.Address, objKey []byte) error {
+ if expEpoch, ok := hasExpirationEpoch(obj); ok {
+ err := putUniqueIndexItem(tx, namedBucketItem{
+ name: expEpochToObjectBucketName,
+ key: expirationEpochKey(expEpoch, addr.Container(), addr.Object()),
+ val: zeroValue,
+ })
+ if err != nil {
+ return err
}
-
+ val := make([]byte, epochSize)
+ binary.LittleEndian.PutUint64(val, expEpoch)
err = putUniqueIndexItem(tx, namedBucketItem{
- name: rootBucketName(cnr, bucketName),
+ name: objectToExpirationEpochBucketName(addr.Container(), make([]byte, bucketKeySize)),
key: objKey,
- val: splitInfo,
+ val: val,
})
if err != nil {
return err
}
}
-
return nil
}
+func putSplitInfo(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, si *objectSDK.SplitInfo, objKey []byte) error {
+ if obj.Type() == objectSDK.TypeRegular && !obj.HasParent() {
+ if ecHead := obj.ECHeader(); ecHead != nil {
+ parentID := ecHead.Parent()
+ if ecHead.ParentSplitID() != nil {
+ parentSplitParentID := ecHead.ParentSplitParentID()
+ if parentSplitParentID == nil {
+ return nil
+ }
+
+ si = objectSDK.NewSplitInfo()
+ si.SetSplitID(ecHead.ParentSplitID())
+ si.SetLastPart(ecHead.Parent())
+
+ parentID = *parentSplitParentID
+ }
+ objKey = objectKey(parentID, objKey)
+ }
+ return updateSplitInfoIndex(tx, objKey, addr.Container(), bucketName, si)
+ }
+ return nil
+}
+
+func updateSplitInfoIndex(tx *bbolt.Tx, objKey []byte, cnr cid.ID, bucketName []byte, si *objectSDK.SplitInfo) error {
+ return updateUniqueIndexItem(tx, namedBucketItem{
+ name: rootBucketName(cnr, bucketName),
+ key: objKey,
+ }, func(old, _ []byte) ([]byte, error) {
+ switch {
+ case si == nil && old == nil:
+ return []byte{}, nil
+ case si == nil:
+ return old, nil
+ case old == nil:
+ return si.Marshal()
+ default:
+ oldSI := objectSDK.NewSplitInfo()
+ if err := oldSI.Unmarshal(old); err != nil {
+ return nil, err
+ }
+ si = util.MergeSplitInfo(si, oldSI)
+ return si.Marshal()
+ }
+ })
+}
+
type updateIndexItemFunc = func(tx *bbolt.Tx, item namedBucketItem) error
func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
@@ -254,18 +336,6 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
objKey := objectKey(idObj, make([]byte, objectKeySize))
bucketName := make([]byte, bucketKeySize)
- cs, _ := obj.PayloadChecksum()
-
- // index payload hashes
- err := f(tx, namedBucketItem{
- name: payloadHashBucketName(cnr, bucketName),
- key: cs.Value(),
- val: objKey,
- })
- if err != nil {
- return err
- }
-
idParent, ok := obj.ParentID()
// index parent ids
@@ -292,28 +362,74 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
}
}
+ if ech := obj.ECHeader(); ech != nil {
+ err := f(tx, namedBucketItem{
+ name: ecInfoBucketName(cnr, bucketName),
+ key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
+ val: objKey,
+ })
+ if err != nil {
+ return err
+ }
+
+ if ech.ParentSplitID() != nil {
+ objKey := objectKey(ech.Parent(), make([]byte, objectKeySize))
+ err := f(tx, namedBucketItem{
+ name: splitBucketName(cnr, bucketName),
+ key: ech.ParentSplitID().ToV2(),
+ val: objKey,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ if parentSplitParentID := ech.ParentSplitParentID(); parentSplitParentID != nil {
+ objKey := objectKey(ech.Parent(), make([]byte, objectKeySize))
+ err := f(tx, namedBucketItem{
+ name: parentBucketName(cnr, bucketName),
+ key: objectKey(*parentSplitParentID, make([]byte, objectKeySize)),
+ val: objKey,
+ })
+ if err != nil {
+ return err
+ }
+ }
+ }
+
return nil
}
+var indexedAttributes = map[string]struct{}{
+ "S3-Access-Box-CRDT-Name": {},
+ objectSDK.AttributeFilePath: {},
+}
+
+// IsAtrributeIndexed returns True if attribute is indexed by metabase.
+func IsAtrributeIndexed(attr string) bool {
+ _, found := indexedAttributes[attr]
+ return found
+}
+
func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
id, _ := obj.ID()
cnr, _ := obj.ContainerID()
objKey := objectKey(id, make([]byte, objectKeySize))
- attrs := obj.Attributes()
-
key := make([]byte, bucketKeySize)
- err := f(tx, namedBucketItem{
- name: ownerBucketName(cnr, key),
- key: []byte(obj.OwnerID().EncodeToString()),
- val: objKey,
- })
- if err != nil {
- return err
+ var attrs []objectSDK.Attribute
+ if obj.ECHeader() != nil {
+ attrs = obj.ECHeader().ParentAttributes()
+ objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize))
+ } else {
+ attrs = obj.Attributes()
}
// user specified attributes
for i := range attrs {
+ if !IsAtrributeIndexed(attrs[i].Key()) {
+ continue
+ }
key = attributeBucketName(cnr, attrs[i].Key(), key)
err := f(tx, namedBucketItem{
name: key,
@@ -328,45 +444,80 @@ func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
return nil
}
-func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt, err := tx.CreateBucketIfNotExists(item.name)
+func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
+ attributes := obj.Attributes()
+ if ech := obj.ECHeader(); ech != nil {
+ attributes = ech.ParentAttributes()
+ }
+ for _, attr := range attributes {
+ if attr.Key() == objectV2.SysAttributeExpEpoch {
+ expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
+ return expEpoch, err == nil
+ }
+ }
+ return 0, false
+}
+
+type bucketContainer interface {
+ Bucket([]byte) *bbolt.Bucket
+ CreateBucket([]byte) (*bbolt.Bucket, error)
+ CreateBucketIfNotExists([]byte) (*bbolt.Bucket, error)
+}
+
+func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Bucket, error) {
+ if bkt := tx.Bucket(name); bkt != nil {
+ return bkt, nil
+ }
+ return tx.CreateBucket(name)
+}
+
+func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error {
+ bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("can't create index %v: %w", item.name, err)
+ return fmt.Errorf("create index %v: %w", item.name, err)
}
- return bkt.Put(item.key, item.val)
+ data, err := update(bkt.Get(item.key), item.val)
+ if err != nil {
+ return err
+ }
+ return bkt.Put(item.key, data)
+}
+
+func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+ return updateUniqueIndexItem(tx, item, func(_, val []byte) ([]byte, error) { return val, nil })
}
func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt, err := tx.CreateBucketIfNotExists(item.name)
+ bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("can't create index %v: %w", item.name, err)
+ return fmt.Errorf("create index %v: %w", item.name, err)
}
- fkbtRoot, err := bkt.CreateBucketIfNotExists(item.key)
+ fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
if err != nil {
- return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err)
+ return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err)
}
return fkbtRoot.Put(item.val, zeroValue)
}
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt, err := tx.CreateBucketIfNotExists(item.name)
+ bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
- return fmt.Errorf("can't create index %v: %w", item.name, err)
+ return fmt.Errorf("create index %v: %w", item.name, err)
}
lst, err := decodeList(bkt.Get(item.key))
if err != nil {
- return fmt.Errorf("can't decode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("decode leaf list %v: %w", item.key, err)
}
lst = append(lst, item.val)
encodedLst, err := encodeList(lst)
if err != nil {
- return fmt.Errorf("can't encode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("encode leaf list %v: %w", item.key, err)
}
return bkt.Put(item.key, encodedLst)
@@ -442,51 +593,26 @@ func getVarUint(data []byte) (uint64, int, error) {
}
}
-// updateStorageID for existing objects if they were moved from one
+// setStorageID for existing objects if they were moved from one
// storage location to another.
-func updateStorageID(tx *bbolt.Tx, addr oid.Address, id []byte) error {
+func setStorageID(tx *bbolt.Tx, addr oid.Address, id []byte, override bool) error {
key := make([]byte, bucketKeySize)
- bkt, err := tx.CreateBucketIfNotExists(smallBucketName(addr.Container(), key))
+ bkt, err := createBucketLikelyExists(tx, smallBucketName(addr.Container(), key))
if err != nil {
return err
}
-
- return bkt.Put(objectKey(addr.Object(), key), id)
+ key = objectKey(addr.Object(), key)
+ if override || bkt.Get(key) == nil {
+ return bkt.Put(key, id)
+ }
+ return nil
}
// updateSpliInfo for existing objects if storage filled with extra information
// about last object in split hierarchy or linking object.
func updateSplitInfo(tx *bbolt.Tx, addr oid.Address, from *objectSDK.SplitInfo) error {
- key := make([]byte, bucketKeySize)
- bkt := tx.Bucket(rootBucketName(addr.Container(), key))
- if bkt == nil {
- // if object doesn't exists and we want to update split info on it
- // then ignore, this should never happen
- return ErrIncorrectSplitInfoUpdate
- }
-
- objectKey := objectKey(addr.Object(), key)
-
- rawSplitInfo := bkt.Get(objectKey)
- if len(rawSplitInfo) == 0 {
- return ErrIncorrectSplitInfoUpdate
- }
-
- to := objectSDK.NewSplitInfo()
-
- err := to.Unmarshal(rawSplitInfo)
- if err != nil {
- return fmt.Errorf("can't unmarshal split info from root index: %w", err)
- }
-
- result := util.MergeSplitInfo(from, to)
-
- rawSplitInfo, err = result.Marshal()
- if err != nil {
- return fmt.Errorf("can't marhsal merged split info: %w", err)
- }
-
- return bkt.Put(objectKey, rawSplitInfo)
+ objKey := objectKey(addr.Object(), make([]byte, bucketKeySize))
+ return updateSplitInfoIndex(tx, objKey, addr.Container(), make([]byte, bucketKeySize), from)
}
// splitInfoFromObject returns split info based on last or linkin object.
diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go
index 89b0a53b84..f37ed4cf2f 100644
--- a/pkg/local_object_storage/metabase/put_test.go
+++ b/pkg/local_object_storage/metabase/put_test.go
@@ -1,27 +1,29 @@
package meta_test
import (
+ "context"
"runtime"
"strconv"
+ "sync/atomic"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
- "go.uber.org/atomic"
)
-func prepareObjects(t testing.TB, n int) []*objectSDK.Object {
+func prepareObjects(n int) []*objectSDK.Object {
cnr := cidtest.ID()
parentID := objecttest.ID()
objs := make([]*objectSDK.Object, n)
for i := range objs {
- objs[i] = generateObjectWithCID(t, cnr)
+ objs[i] = testutil.GenerateObjectWithCID(cnr)
// FKBT indices.
attrs := make([]objectSDK.Attribute, 20)
@@ -44,16 +46,19 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(runtime.NumCPU()))
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
// Ensure the benchmark is bound by CPU and not waiting batch-delay time.
b.SetParallelism(1)
- index := atomic.NewInt64(-1)
- objs := prepareObjects(b, b.N)
+ var index atomic.Int64
+ index.Store(-1)
+
+ objs := prepareObjects(b.N)
b.ResetTimer()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if err := metaPut(db, objs[index.Inc()], nil); err != nil {
+ if err := metaPut(db, objs[index.Add(1)], nil); err != nil {
b.Fatal(err)
}
}
@@ -63,22 +68,25 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(1))
- index := atomic.NewInt64(-1)
- objs := prepareObjects(b, b.N)
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
+ var index atomic.Int64
+ index.Store(-1)
+ objs := prepareObjects(b.N)
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- if err := metaPut(db, objs[index.Inc()], nil); err != nil {
+ for range b.N {
+ if err := metaPut(db, objs[index.Add(1)], nil); err != nil {
b.Fatal(err)
}
}
})
}
-func TestDB_PutBlobovnicaUpdate(t *testing.T) {
+func TestDB_PutBlobovniczaUpdate(t *testing.T) {
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
- raw1 := generateObject(t)
+ raw1 := testutil.GenerateObject()
storageID := []byte{1, 2, 3, 4}
// put one object with storageID
@@ -101,7 +109,7 @@ func TestDB_PutBlobovnicaUpdate(t *testing.T) {
})
t.Run("update storageID on bad object", func(t *testing.T) {
- raw2 := generateObject(t)
+ raw2 := testutil.GenerateObject()
err := putBig(db, raw2)
require.NoError(t, err)
@@ -116,7 +124,7 @@ func metaPut(db *meta.DB, obj *objectSDK.Object, id []byte) error {
putPrm.SetObject(obj)
putPrm.SetStorageID(id)
- _, err := db.Put(putPrm)
+ _, err := db.Put(context.Background(), putPrm)
return err
}
diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go
new file mode 100644
index 0000000000..5f0956f0bd
--- /dev/null
+++ b/pkg/local_object_storage/metabase/reset_test.go
@@ -0,0 +1,57 @@
+package meta
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "github.com/stretchr/testify/require"
+ "go.etcd.io/bbolt"
+)
+
+type epochState struct{ e uint64 }
+
+func (s epochState) CurrentEpoch() uint64 {
+ return s.e
+}
+
+func TestResetDropsContainerBuckets(t *testing.T) {
+ t.Parallel()
+
+ db := New(
+ []Option{
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochState{}),
+ }...,
+ )
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
+
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ for idx := range 100 {
+ var putPrm PutPrm
+ putPrm.SetObject(testutil.GenerateObject())
+ putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx))
+ _, err := db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, db.Reset())
+
+ var bucketCount int
+ require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
+ _, exists := mStaticBuckets[string(name)]
+ require.True(t, exists, "unexpected bucket:"+string(name))
+ bucketCount++
+ return nil
+ })
+ }))
+ require.Equal(t, len(mStaticBuckets), bucketCount)
+}
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 20985f47a0..60da506717 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -1,17 +1,22 @@
package meta
import (
+ "context"
"encoding/binary"
"errors"
"fmt"
"strings"
+ "time"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
- "go.uber.org/zap"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type (
@@ -24,14 +29,15 @@ type (
cnr cid.ID
- fastFilters, slowFilters object.SearchFilters
+ fastFilters, slowFilters objectSDK.SearchFilters
}
)
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters object.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ useAttributeIndex bool
}
// SelectRes groups the resulting values of Select operation.
@@ -45,17 +51,35 @@ func (p *SelectPrm) SetContainerID(cnr cid.ID) {
}
// SetFilters is a Select option to set the object filters.
-func (p *SelectPrm) SetFilters(fs object.SearchFilters) {
+func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) {
p.filters = fs
}
+func (p *SelectPrm) SetUseAttributeIndex(v bool) {
+ p.useAttributeIndex = v
+}
+
// AddressList returns list of addresses of the selected objects.
func (r SelectRes) AddressList() []oid.Address {
return r.addrList
}
// Select returns list of addresses of objects that match search filters.
-func (db *DB) Select(prm SelectPrm) (res SelectRes, err error) {
+func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("Select", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Select",
+ trace.WithAttributes(
+ attribute.String("container_id", prm.cnr.EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -63,21 +87,22 @@ func (db *DB) Select(prm SelectPrm) (res SelectRes, err error) {
return res, ErrDegradedMode
}
- if blindlyProcess(prm.filters) {
+ if checkNonEmpty(prm.filters) {
+ success = true
return res, nil
}
currEpoch := db.epochState.CurrentEpoch()
- return res, db.boltDB.View(func(tx *bbolt.Tx) error {
- res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch)
-
+ return res, metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
+ res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch, prm.useAttributeIndex)
+ success = err == nil
return err
- })
+ }))
}
-func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs object.SearchFilters, currEpoch uint64) ([]oid.Address, error) {
- group, err := groupFilters(fs)
+func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64, useAttributeIndex bool) ([]oid.Address, error) {
+ group, err := groupFilters(fs, useAttributeIndex)
if err != nil {
return nil, err
}
@@ -106,6 +131,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs object.SearchFilters, c
res := make([]oid.Address, 0, len(mAddr))
+ bc := newBucketCache()
for a, ind := range mAddr {
if ind != expLen {
continue // ignore objects with unmatched fast filters
@@ -120,12 +146,16 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs object.SearchFilters, c
var addr oid.Address
addr.SetContainer(cnr)
addr.SetObject(id)
-
- if objectStatus(tx, addr, currEpoch) > 0 {
+ st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
+ if err != nil {
+ return nil, err
+ }
+ if st > 0 {
continue // ignore removed objects
}
- if !db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) {
+ addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch)
+ if !match {
continue // ignore objects with unmatched slow filters
}
@@ -140,7 +170,6 @@ func (db *DB) selectAll(tx *bbolt.Tx, cnr cid.ID, to map[string]int) {
bucketName := make([]byte, bucketKeySize)
selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, 0)
selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, 0)
- selectAllFromBucket(tx, storageGroupBucketName(cnr, bucketName), to, 0)
selectAllFromBucket(tx, parentBucketName(cnr, bucketName), to, 0)
selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, 0)
}
@@ -153,7 +182,7 @@ func selectAllFromBucket(tx *bbolt.Tx, name []byte, to map[string]int, fNum int)
return
}
- _ = bkt.ForEach(func(k, v []byte) error {
+ _ = bkt.ForEach(func(k, _ []byte) error {
markAddressInCache(to, fNum, string(k))
return nil
@@ -165,7 +194,7 @@ func selectAllFromBucket(tx *bbolt.Tx, name []byte, to map[string]int, fNum int)
func (db *DB) selectFastFilter(
tx *bbolt.Tx,
cnr cid.ID, // container we search on
- f object.SearchFilter, // fast filter
+ f objectSDK.SearchFilter, // fast filter
to map[string]int, // resulting cache
fNum int, // index of filter
) {
@@ -174,12 +203,6 @@ func (db *DB) selectFastFilter(
switch f.Header() {
case v2object.FilterHeaderObjectID:
db.selectObjectID(tx, f, cnr, to, fNum, currEpoch)
- case v2object.FilterHeaderOwnerID:
- bucketName := ownerBucketName(cnr, bucketName)
- db.selectFromFKBT(tx, bucketName, f, to, fNum)
- case v2object.FilterHeaderPayloadHash:
- bucketName := payloadHashBucketName(cnr, bucketName)
- db.selectFromList(tx, bucketName, f, to, fNum)
case v2object.FilterHeaderObjectType:
for _, bucketName := range bucketNamesForType(cnr, f.Operation(), f.Value()) {
selectAllFromBucket(tx, bucketName, to, fNum)
@@ -190,17 +213,18 @@ func (db *DB) selectFastFilter(
case v2object.FilterHeaderSplitID:
bucketName := splitBucketName(cnr, bucketName)
db.selectFromList(tx, bucketName, f, to, fNum)
+ case v2object.FilterHeaderECParent:
+ bucketName := ecInfoBucketName(cnr, bucketName)
+ db.selectFromList(tx, bucketName, f, to, fNum)
case v2object.FilterPropertyRoot:
selectAllFromBucket(tx, rootBucketName(cnr, bucketName), to, fNum)
case v2object.FilterPropertyPhy:
selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, fNum)
- selectAllFromBucket(tx, storageGroupBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum)
default: // user attribute
bucketName := attributeBucketName(cnr, f.Header(), bucketName)
-
- if f.Operation() == object.MatchNotPresent {
+ if f.Operation() == objectSDK.MatchNotPresent {
selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum)
} else {
db.selectFromFKBT(tx, bucketName, f, to, fNum)
@@ -209,10 +233,9 @@ func (db *DB) selectFastFilter(
}
var mBucketNaming = map[string][]func(cid.ID, []byte) []byte{
- v2object.TypeRegular.String(): {primaryBucketName, parentBucketName},
- v2object.TypeTombstone.String(): {tombstoneBucketName},
- v2object.TypeStorageGroup.String(): {storageGroupBucketName},
- v2object.TypeLock.String(): {bucketNameLockers},
+ v2object.TypeRegular.String(): {primaryBucketName, parentBucketName},
+ v2object.TypeTombstone.String(): {tombstoneBucketName},
+ v2object.TypeLock.String(): {bucketNameLockers},
}
func allBucketNames(cnr cid.ID) (names [][]byte) {
@@ -221,11 +244,10 @@ func allBucketNames(cnr cid.ID) (names [][]byte) {
names = append(names, fn(cnr, make([]byte, bucketKeySize)))
}
}
-
return
}
-func bucketNamesForType(cnr cid.ID, mType object.SearchMatchType, typeVal string) (names [][]byte) {
+func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal string) (names [][]byte) {
appendNames := func(key string) {
fns, ok := mBucketNaming[key]
if ok {
@@ -237,15 +259,15 @@ func bucketNamesForType(cnr cid.ID, mType object.SearchMatchType, typeVal string
switch mType {
default:
- case object.MatchStringNotEqual:
+ case objectSDK.MatchStringNotEqual:
for key := range mBucketNaming {
if key != typeVal {
appendNames(key)
}
}
- case object.MatchStringEqual:
+ case objectSDK.MatchStringEqual:
appendNames(typeVal)
- case object.MatchCommonPrefix:
+ case objectSDK.MatchCommonPrefix:
for key := range mBucketNaming {
if strings.HasPrefix(key, typeVal) {
appendNames(key)
@@ -256,19 +278,15 @@ func bucketNamesForType(cnr cid.ID, mType object.SearchMatchType, typeVal string
return
}
-// selectFromList looks into index to find list of addresses to add in
-// resulting cache.
func (db *DB) selectFromFKBT(
tx *bbolt.Tx,
name []byte, // fkbt root bucket name
- f object.SearchFilter, // filter for operation and value
+ f objectSDK.SearchFilter, // filter for operation and value
to map[string]int, // resulting cache
fNum int, // index of filter
) { //
matchFunc, ok := db.matchers[f.Operation()]
if !ok {
- db.log.Debug("missing matcher", zap.Uint32("operation", uint32(f.Operation())))
-
return
}
@@ -277,7 +295,7 @@ func (db *DB) selectFromFKBT(
return
}
- err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
+ _ = matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
fkbtLeaf := fkbtRoot.Bucket(k)
if fkbtLeaf == nil {
return nil
@@ -289,9 +307,6 @@ func (db *DB) selectFromFKBT(
return nil
})
})
- if err != nil {
- db.log.Debug("error in FKBT selection", zap.String("error", err.Error()))
- }
}
// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in
@@ -307,12 +322,8 @@ func selectOutsideFKBT(
bktExcl := tx.Bucket(name)
if bktExcl != nil {
- _ = bktExcl.ForEach(func(k, _ []byte) error {
+ _ = bktExcl.ForEachBucket(func(k []byte) error {
exclBktLeaf := bktExcl.Bucket(k)
- if exclBktLeaf == nil {
- return nil
- }
-
return exclBktLeaf.ForEach(func(k, _ []byte) error {
mExcl[string(k)] = struct{}{}
@@ -342,7 +353,7 @@ func selectOutsideFKBT(
func (db *DB) selectFromList(
tx *bbolt.Tx,
name []byte, // list root bucket name
- f object.SearchFilter, // filter for operation and value
+ f objectSDK.SearchFilter, // filter for operation and value
to map[string]int, // resulting cache
fNum int, // index of filter
) { //
@@ -357,27 +368,20 @@ func (db *DB) selectFromList(
)
switch op := f.Operation(); op {
- case object.MatchStringEqual:
+ case objectSDK.MatchStringEqual:
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
if err != nil {
- db.log.Debug("can't decode list bucket leaf", zap.String("error", err.Error()))
return
}
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug("unknown operation", zap.Uint32("operation", uint32(op)))
-
return
}
- if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(key, val []byte) error {
+ if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(_, val []byte) error {
l, err := decodeList(val)
if err != nil {
- db.log.Debug("can't decode list bucket leaf",
- zap.String("error", err.Error()),
- )
-
return err
}
@@ -385,10 +389,6 @@ func (db *DB) selectFromList(
return nil
}); err != nil {
- db.log.Debug("can't iterate over the bucket",
- zap.String("error", err.Error()),
- )
-
return
}
}
@@ -401,7 +401,7 @@ func (db *DB) selectFromList(
// selectObjectID processes objectID filter with in-place optimizations.
func (db *DB) selectObjectID(
tx *bbolt.Tx,
- f object.SearchFilter,
+ f objectSDK.SearchFilter,
cnr cid.ID,
to map[string]int, // resulting cache
fNum int, // index of filter
@@ -412,7 +412,8 @@ func (db *DB) selectObjectID(
addr.SetContainer(cnr)
addr.SetObject(id)
- ok, err := db.exists(tx, addr, currEpoch)
+ var splitInfoError *objectSDK.SplitInfoError
+ ok, _, err := db.exists(tx, addr, oid.Address{}, currEpoch)
if (err == nil && ok) || errors.As(err, &splitInfoError) {
raw := make([]byte, objectKeySize)
id.Encode(raw)
@@ -421,7 +422,7 @@ func (db *DB) selectObjectID(
}
switch op := f.Operation(); op {
- case object.MatchStringEqual:
+ case objectSDK.MatchStringEqual:
var id oid.ID
if err := id.DecodeString(f.Value()); err == nil {
appendOID(id)
@@ -429,87 +430,135 @@ func (db *DB) selectObjectID(
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug("unknown operation",
- zap.Uint32("operation", uint32(f.Operation())),
- )
-
return
}
- for _, bucketName := range bucketNamesForType(cnr, object.MatchStringNotEqual, "") {
+ for _, bucketName := range bucketNamesForType(cnr, objectSDK.MatchStringNotEqual, "") {
// copy-paste from DB.selectAllFrom
bkt := tx.Bucket(bucketName)
if bkt == nil {
return
}
- err := fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, v []byte) error {
+ _ = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
var id oid.ID
if err := id.Decode(k); err == nil {
appendOID(id)
}
return nil
})
- if err != nil {
- db.log.Debug("could not iterate over the buckets",
- zap.String("error", err.Error()),
- )
- }
}
}
}
// matchSlowFilters return true if object header is matched by all slow filters.
-func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f object.SearchFilters, currEpoch uint64) bool {
+func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
+ result := addr
if len(f) == 0 {
- return true
+ return result, true
}
- buf := make([]byte, addressKeySize)
- obj, err := db.get(tx, addr, buf, true, false, currEpoch)
+ obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch)
if err != nil {
- return false
+ return result, false
}
for i := range f {
- matchFunc, ok := db.matchers[f[i].Operation()]
- if !ok {
- return false
- }
-
var data []byte
-
switch f[i].Header() {
case v2object.FilterHeaderVersion:
data = []byte(obj.Version().String())
case v2object.FilterHeaderHomomorphicHash:
+ if isECChunk {
+ return result, false // EC chunk and EC parent hashes are incomparable
+ }
cs, _ := obj.PayloadHomomorphicHash()
data = cs.Value()
case v2object.FilterHeaderCreationEpoch:
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.CreationEpoch())
case v2object.FilterHeaderPayloadLength:
+ if isECChunk {
+ return result, false // EC chunk and EC parent payload lengths are incomparable
+ }
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.PayloadSize())
- default:
- continue // ignore unknown search attributes
+ case v2object.FilterHeaderOwnerID:
+ data = []byte(obj.OwnerID().EncodeToString())
+ case v2object.FilterHeaderPayloadHash:
+ if isECChunk {
+ return result, false // EC chunk and EC parent payload hashes are incomparable
+ }
+ cs, _ := obj.PayloadChecksum()
+ data = cs.Value()
+ default: // user attribute
+ v, ok := attributeValue(obj, f[i].Header())
+ if ok {
+ if ech := obj.ECHeader(); ech != nil {
+ result.SetObject(ech.Parent())
+ }
+ data = []byte(v)
+ } else {
+ return result, f[i].Operation() == objectSDK.MatchNotPresent
+ }
+ }
+
+ matchFunc, ok := db.matchers[f[i].Operation()]
+ if !ok {
+ return result, false
}
if !matchFunc.matchSlow(f[i].Header(), data, f[i].Value()) {
- return false
+ return result, false
}
}
- return true
+ return result, true
+}
+
+func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
+ buf := make([]byte, addressKeySize)
+ obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch)
+ if err != nil {
+ var ecInfoError *objectSDK.ECInfoError
+ if errors.As(err, &ecInfoError) {
+ for _, chunk := range ecInfoError.ECInfo().Chunks {
+ var objID oid.ID
+ if err = objID.ReadFromV2(chunk.ID); err != nil {
+ continue
+ }
+ addr.SetObject(objID)
+ obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch)
+ if err == nil {
+ return obj, true, nil
+ }
+ }
+ }
+ return nil, false, err
+ }
+ return obj, false, nil
+}
+
+func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
+ objectAttributes := obj.Attributes()
+ if ech := obj.ECHeader(); ech != nil {
+ objectAttributes = ech.ParentAttributes()
+ }
+ for _, attr := range objectAttributes {
+ if attr.Key() == attribute {
+ return attr.Value(), true
+ }
+ }
+ return "", false
}
// groupFilters divides filters in two groups: fast and slow. Fast filters
// processed by indexes and slow filters processed after by unmarshaling
// object headers.
-func groupFilters(filters object.SearchFilters) (filterGroup, error) {
+func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filterGroup, error) {
res := filterGroup{
- fastFilters: make(object.SearchFilters, 0, len(filters)),
- slowFilters: make(object.SearchFilters, 0, len(filters)),
+ fastFilters: make(objectSDK.SearchFilters, 0, len(filters)),
+ slowFilters: make(objectSDK.SearchFilters, 0, len(filters)),
}
for i := range filters {
@@ -517,18 +566,25 @@ func groupFilters(filters object.SearchFilters) (filterGroup, error) {
case v2object.FilterHeaderContainerID: // support deprecated field
err := res.cnr.DecodeString(filters[i].Value())
if err != nil {
- return filterGroup{}, fmt.Errorf("can't parse container id: %w", err)
+ return filterGroup{}, fmt.Errorf("parse container id: %w", err)
}
res.withCnrFilter = true
- case // slow filters
- v2object.FilterHeaderVersion,
- v2object.FilterHeaderCreationEpoch,
- v2object.FilterHeaderPayloadLength,
- v2object.FilterHeaderHomomorphicHash:
- res.slowFilters = append(res.slowFilters, filters[i])
- default: // fast filters or user attributes if unknown
+ case // fast filters
+ v2object.FilterHeaderObjectID,
+ v2object.FilterHeaderObjectType,
+ v2object.FilterHeaderParent,
+ v2object.FilterHeaderSplitID,
+ v2object.FilterHeaderECParent,
+ v2object.FilterPropertyRoot,
+ v2object.FilterPropertyPhy:
res.fastFilters = append(res.fastFilters, filters[i])
+ default:
+ if useAttributeIndex && IsAtrributeIndexed(filters[i].Header()) {
+ res.fastFilters = append(res.fastFilters, filters[i])
+ } else {
+ res.slowFilters = append(res.slowFilters, filters[i])
+ }
}
}
@@ -541,15 +597,12 @@ func markAddressInCache(cache map[string]int, fNum int, addr string) {
}
}
-// returns true if query leads to a deliberately empty result.
-func blindlyProcess(fs object.SearchFilters) bool {
+// Returns true if at least 1 object can satisfy fs.
+func checkNonEmpty(fs objectSDK.SearchFilters) bool {
for i := range fs {
- if fs[i].Operation() == object.MatchNotPresent && isSystemKey(fs[i].Header()) {
+ if fs[i].Operation() == objectSDK.MatchNotPresent && isSystemKey(fs[i].Header()) {
return true
}
-
- // TODO: #1148 check other cases
- // e.g. (a == b) && (a != b)
}
return false
@@ -557,6 +610,5 @@ func blindlyProcess(fs object.SearchFilters) bool {
// returns true if string key is a reserved system filter key.
func isSystemKey(key string) bool {
- // FIXME: #1147 version-dependent approach
return strings.HasPrefix(key, v2object.ReservedFilterPrefix)
}
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 685432bb0f..ce2156d2e7 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -1,185 +1,240 @@
package meta_test
import (
+ "context"
"encoding/hex"
+ "math/rand"
"strconv"
"testing"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestDB_SelectUserAttributes(t *testing.T) {
+ t.Parallel()
+ t.Run("with_index", func(t *testing.T) {
+ testSelectUserAttributes(t, true)
+ })
+ t.Run("without_index", func(t *testing.T) {
+ testSelectUserAttributes(t, false)
+ })
+}
+
+func testSelectUserAttributes(t *testing.T, index bool) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
- raw1 := generateObjectWithCID(t, cnr)
- addAttribute(raw1, "foo", "bar")
- addAttribute(raw1, "x", "y")
+ raw1 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(raw1, "foo", "bar")
+ testutil.AddAttribute(raw1, "x", "y")
- err := putBig(db, raw1)
+ var putPrm meta.PutPrm
+ putPrm.SetIndexAttributes(index)
+ putPrm.SetObject(raw1)
+ _, err := db.Put(context.Background(), putPrm)
require.NoError(t, err)
- raw2 := generateObjectWithCID(t, cnr)
- addAttribute(raw2, "foo", "bar")
- addAttribute(raw2, "x", "z")
+ raw2 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(raw2, "foo", "bar")
+ testutil.AddAttribute(raw2, "x", "z")
- err = putBig(db, raw2)
+ putPrm.SetObject(raw2)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
- raw3 := generateObjectWithCID(t, cnr)
- addAttribute(raw3, "a", "b")
+ raw3 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(raw3, "a", "b")
- err = putBig(db, raw3)
+ putPrm.SetObject(raw3)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
- raw4 := generateObjectWithCID(t, cnr)
- addAttribute(raw4, "path", "test/1/2")
+ raw4 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(raw4, objectSDK.AttributeFilePath, "/test/1/2")
- err = putBig(db, raw4)
+ putPrm.SetObject(raw4)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
- raw5 := generateObjectWithCID(t, cnr)
- addAttribute(raw5, "path", "test/1/3")
+ raw5 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(raw5, objectSDK.AttributeFilePath, "/test/1/3")
- err = putBig(db, raw5)
+ putPrm.SetObject(raw5)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
- raw6 := generateObjectWithCID(t, cnr)
- addAttribute(raw6, "path", "test/2/3")
+ raw6 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(raw6, objectSDK.AttributeFilePath, "/test/2/3")
- err = putBig(db, raw6)
+ putPrm.SetObject(raw6)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
+ raw7 := testutil.GenerateObjectWithCID(cnr)
+ var attr objectSDK.Attribute
+ attr.SetKey(objectSDK.AttributeFilePath)
+ attr.SetValue("/test/3/4")
+ attrs := raw7.Attributes()
+ attrs = append(attrs, attr)
+ ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{
+ ID: oidtest.ID(),
+ Attributes: attrs,
+ }, 0, 3, []byte{}, 0)
+ raw7.SetECHeader(ech)
+ putPrm.SetObject(raw7)
+ _, err = db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ var raw7Parent oid.Address
+ raw7Parent.SetContainer(cnr)
+ raw7Parent.SetObject(ech.Parent())
+
fs := objectSDK.SearchFilters{}
fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw1))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1))
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringNotEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw2))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw2))
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "b", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw3))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3))
fs = objectSDK.SearchFilters{}
fs.AddFilter("c", "d", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs)
+ testSelect2(t, db, cnr, fs, index)
fs = objectSDK.SearchFilters{}
fs.AddFilter("foo", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("key", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test", objectSDK.MatchCommonPrefix)
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ raw7Parent,
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test/1", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test/1", objectSDK.MatchCommonPrefix)
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw4),
object.AddressOf(raw5),
)
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test/3/4", objectSDK.MatchStringEqual)
+ testSelect2(t, db, cnr, fs, index,
+ raw7Parent,
+ )
}
func TestDB_SelectRootPhyParent(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
// prepare
- small := generateObjectWithCID(t, cnr)
+ small := testutil.GenerateObjectWithCID(cnr)
err := putBig(db, small)
require.NoError(t, err)
- ts := generateObjectWithCID(t, cnr)
+ ts := testutil.GenerateObjectWithCID(cnr)
ts.SetType(objectSDK.TypeTombstone)
err = putBig(db, ts)
require.NoError(t, err)
- sg := generateObjectWithCID(t, cnr)
- sg.SetType(objectSDK.TypeStorageGroup)
- err = putBig(db, sg)
- require.NoError(t, err)
-
- leftChild := generateObjectWithCID(t, cnr)
+ leftChild := testutil.GenerateObjectWithCID(cnr)
leftChild.InitRelations()
err = putBig(db, leftChild)
require.NoError(t, err)
- lock := generateObjectWithCID(t, cnr)
+ lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
err = putBig(db, lock)
require.NoError(t, err)
- parent := generateObjectWithCID(t, cnr)
+ parent := testutil.GenerateObjectWithCID(cnr)
- rightChild := generateObjectWithCID(t, cnr)
+ rightChild := testutil.GenerateObjectWithCID(cnr)
rightChild.SetParent(parent)
idParent, _ := parent.ID()
rightChild.SetParentID(idParent)
err = putBig(db, rightChild)
require.NoError(t, err)
- link := generateObjectWithCID(t, cnr)
+ link := testutil.GenerateObjectWithCID(cnr)
link.SetParent(parent)
link.SetParentID(idParent)
idLeftChild, _ := leftChild.ID()
@@ -208,7 +263,6 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
testSelect(t, db, cnr, fs,
object.AddressOf(small),
object.AddressOf(ts),
- object.AddressOf(sg),
object.AddressOf(leftChild),
object.AddressOf(rightChild),
object.AddressOf(link),
@@ -235,7 +289,6 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeRegular.String(), objectSDK.MatchStringNotEqual)
testSelect(t, db, cnr, fs,
object.AddressOf(ts),
- object.AddressOf(sg),
object.AddressOf(lock),
)
@@ -257,29 +310,6 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
object.AddressOf(rightChild),
object.AddressOf(link),
object.AddressOf(parent),
- object.AddressOf(sg),
- object.AddressOf(lock),
- )
-
- fs = objectSDK.SearchFilters{}
- fs.AddFilter(v2object.FilterHeaderObjectType, "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs)
- })
-
- t.Run("storage group objects", func(t *testing.T) {
- fs := objectSDK.SearchFilters{}
- fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeStorageGroup.String(), objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(sg))
-
- fs = objectSDK.SearchFilters{}
- fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeStorageGroup.String(), objectSDK.MatchStringNotEqual)
- testSelect(t, db, cnr, fs,
- object.AddressOf(small),
- object.AddressOf(leftChild),
- object.AddressOf(rightChild),
- object.AddressOf(link),
- object.AddressOf(parent),
- object.AddressOf(ts),
object.AddressOf(lock),
)
@@ -311,7 +341,6 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
testSelect(t, db, cnr, fs,
object.AddressOf(small),
object.AddressOf(ts),
- object.AddressOf(sg),
object.AddressOf(leftChild),
object.AddressOf(rightChild),
object.AddressOf(link),
@@ -322,15 +351,18 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
}
func TestDB_SelectInhume(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
- raw1 := generateObjectWithCID(t, cnr)
+ raw1 := testutil.GenerateObjectWithCID(cnr)
err := putBig(db, raw1)
require.NoError(t, err)
- raw2 := generateObjectWithCID(t, cnr)
+ raw2 := testutil.GenerateObjectWithCID(cnr)
err = putBig(db, raw2)
require.NoError(t, err)
@@ -340,11 +372,7 @@ func TestDB_SelectInhume(t *testing.T) {
object.AddressOf(raw2),
)
- var tombstone oid.Address
- tombstone.SetContainer(cnr)
- tombstone.SetObject(oidtest.ID())
-
- err = metaInhume(db, object.AddressOf(raw2), tombstone)
+ err = metaInhume(db, object.AddressOf(raw2), oidtest.ID())
require.NoError(t, err)
fs = objectSDK.SearchFilters{}
@@ -354,15 +382,18 @@ func TestDB_SelectInhume(t *testing.T) {
}
func TestDB_SelectPayloadHash(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
- raw1 := generateObjectWithCID(t, cnr)
+ raw1 := testutil.GenerateObjectWithCID(cnr)
err := putBig(db, raw1)
require.NoError(t, err)
- raw2 := generateObjectWithCID(t, cnr)
+ raw2 := testutil.GenerateObjectWithCID(cnr)
err = putBig(db, raw2)
require.NoError(t, err)
@@ -422,7 +453,10 @@ func TestDB_SelectPayloadHash(t *testing.T) {
}
func TestDB_SelectWithSlowFilters(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -433,14 +467,14 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
v21.SetMajor(2)
v21.SetMinor(1)
- raw1 := generateObjectWithCID(t, cnr)
+ raw1 := testutil.GenerateObjectWithCID(cnr)
raw1.SetPayloadSize(10)
raw1.SetCreationEpoch(11)
raw1.SetVersion(v20)
err := putBig(db, raw1)
require.NoError(t, err)
- raw2 := generateObjectWithCID(t, cnr)
+ raw2 := testutil.GenerateObjectWithCID(cnr)
raw2.SetPayloadSize(20)
raw2.SetCreationEpoch(21)
raw2.SetVersion(&v21)
@@ -527,15 +561,18 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
}
func TestDB_SelectObjectID(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
// prepare
- parent := generateObjectWithCID(t, cnr)
+ parent := testutil.GenerateObjectWithCID(cnr)
- regular := generateObjectWithCID(t, cnr)
+ regular := testutil.GenerateObjectWithCID(cnr)
idParent, _ := parent.ID()
regular.SetParentID(idParent)
regular.SetParent(parent)
@@ -543,23 +580,18 @@ func TestDB_SelectObjectID(t *testing.T) {
err := putBig(db, regular)
require.NoError(t, err)
- ts := generateObjectWithCID(t, cnr)
+ ts := testutil.GenerateObjectWithCID(cnr)
ts.SetType(objectSDK.TypeTombstone)
err = putBig(db, ts)
require.NoError(t, err)
- sg := generateObjectWithCID(t, cnr)
- sg.SetType(objectSDK.TypeStorageGroup)
- err = putBig(db, sg)
- require.NoError(t, err)
-
- lock := generateObjectWithCID(t, cnr)
+ lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
err = putBig(db, lock)
require.NoError(t, err)
t.Run("not found objects", func(t *testing.T) {
- raw := generateObjectWithCID(t, cnr)
+ raw := testutil.GenerateObjectWithCID(cnr)
id, _ := raw.ID()
@@ -574,7 +606,6 @@ func TestDB_SelectObjectID(t *testing.T) {
testSelect(t, db, cnr, fs,
object.AddressOf(regular),
object.AddressOf(parent),
- object.AddressOf(sg),
object.AddressOf(ts),
object.AddressOf(lock),
)
@@ -591,7 +622,6 @@ func TestDB_SelectObjectID(t *testing.T) {
fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id)
testSelect(t, db, cnr, fs,
object.AddressOf(parent),
- object.AddressOf(sg),
object.AddressOf(ts),
object.AddressOf(lock),
)
@@ -609,24 +639,6 @@ func TestDB_SelectObjectID(t *testing.T) {
testSelect(t, db, cnr, fs,
object.AddressOf(regular),
object.AddressOf(parent),
- object.AddressOf(sg),
- object.AddressOf(lock),
- )
- })
-
- t.Run("storage group objects", func(t *testing.T) {
- id, _ := sg.ID()
-
- fs := objectSDK.SearchFilters{}
- fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id)
- testSelect(t, db, cnr, fs, object.AddressOf(sg))
-
- fs = objectSDK.SearchFilters{}
- fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id)
- testSelect(t, db, cnr, fs,
- object.AddressOf(regular),
- object.AddressOf(parent),
- object.AddressOf(ts),
object.AddressOf(lock),
)
})
@@ -642,7 +654,6 @@ func TestDB_SelectObjectID(t *testing.T) {
fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id)
testSelect(t, db, cnr, fs,
object.AddressOf(regular),
- object.AddressOf(sg),
object.AddressOf(ts),
object.AddressOf(lock),
)
@@ -660,20 +671,394 @@ func TestDB_SelectObjectID(t *testing.T) {
testSelect(t, db, cnr, fs,
object.AddressOf(regular),
object.AddressOf(parent),
- object.AddressOf(sg),
object.AddressOf(ts),
)
})
}
-func TestDB_SelectSplitID(t *testing.T) {
+func TestDB_SelectOwnerID(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
- child1 := generateObjectWithCID(t, cnr)
- child2 := generateObjectWithCID(t, cnr)
- child3 := generateObjectWithCID(t, cnr)
+ // prepare
+
+ parent := testutil.GenerateObjectWithCID(cnr)
+
+ regular := testutil.GenerateObjectWithCID(cnr)
+ idParent, _ := parent.ID()
+ regular.SetParentID(idParent)
+ regular.SetParent(parent)
+
+ err := putBig(db, regular)
+ require.NoError(t, err)
+
+ ts := testutil.GenerateObjectWithCID(cnr)
+ ts.SetType(objectSDK.TypeTombstone)
+ err = putBig(db, ts)
+ require.NoError(t, err)
+
+ lock := testutil.GenerateObjectWithCID(cnr)
+ lock.SetType(objectSDK.TypeLock)
+ err = putBig(db, lock)
+ require.NoError(t, err)
+
+ t.Run("not found objects", func(t *testing.T) {
+ raw := testutil.GenerateObjectWithCID(cnr)
+
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, raw.OwnerID())
+
+ testSelect(t, db, cnr, fs)
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, raw.OwnerID())
+
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(regular),
+ object.AddressOf(parent),
+ object.AddressOf(ts),
+ object.AddressOf(lock),
+ )
+ })
+
+ t.Run("regular objects", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, regular.OwnerID())
+ testSelect(t, db, cnr, fs, object.AddressOf(regular))
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, regular.OwnerID())
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(parent),
+ object.AddressOf(ts),
+ object.AddressOf(lock),
+ )
+ })
+
+ t.Run("tombstone objects", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, ts.OwnerID())
+ testSelect(t, db, cnr, fs, object.AddressOf(ts))
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, ts.OwnerID())
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(regular),
+ object.AddressOf(parent),
+ object.AddressOf(lock),
+ )
+ })
+
+ t.Run("parent objects", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, parent.OwnerID())
+ testSelect(t, db, cnr, fs, object.AddressOf(parent))
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, parent.OwnerID())
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(regular),
+ object.AddressOf(ts),
+ object.AddressOf(lock),
+ )
+ })
+
+ t.Run("lock objects", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, lock.OwnerID())
+ testSelect(t, db, cnr, fs, object.AddressOf(lock))
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, lock.OwnerID())
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(regular),
+ object.AddressOf(parent),
+ object.AddressOf(ts),
+ )
+ })
+}
+
+func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+ ecChunk1 := oidtest.ID()
+ ecChunk2 := oidtest.ID()
+ ecParent := oidtest.ID()
+ var ecParentAddr oid.Address
+ ecParentAddr.SetContainer(cnr)
+ ecParentAddr.SetObject(ecParent)
+ var ecParentAttr []objectSDK.Attribute
+ var attr objectSDK.Attribute
+ attr.SetKey(objectSDK.AttributeFilePath)
+ attr.SetValue("/1/2/3")
+ ecParentAttr = append(ecParentAttr, attr)
+
+ chunkObj := testutil.GenerateObjectWithCID(cnr)
+ chunkObj.SetID(ecChunk1)
+ chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
+ chunkObj.SetPayloadSize(uint64(5))
+ chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0))
+
+ chunkObj2 := testutil.GenerateObjectWithCID(cnr)
+ chunkObj2.SetID(ecChunk2)
+ chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ chunkObj2.SetPayloadSize(uint64(10))
+ chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 1, 3, []byte{}, 0))
+
+ // put object with EC
+
+ var prm meta.PutPrm
+ prm.SetObject(chunkObj)
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ prm.SetObject(chunkObj2)
+ _, err = db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ fs := objectSDK.SearchFilters{}
+ fs.AddRootFilter()
+ fs.AddFilter(objectSDK.AttributeFilePath, "/1/2/3", objectSDK.MatchCommonPrefix)
+ testSelect(t, db, cnr, fs, ecParentAddr)
+}
+
+type testTarget struct {
+ objects []*objectSDK.Object
+}
+
+func (tt *testTarget) WriteObject(_ context.Context, obj *objectSDK.Object) error {
+ tt.objects = append(tt.objects, obj)
+ return nil
+}
+
+func cutObject(t *testing.T, p transformer.ChunkedObjectWriter, hdr *objectSDK.Object, size int) *transformer.AccessIdentifiers {
+ ctx := context.Background()
+ require.NoError(t, p.WriteHeader(ctx, hdr))
+
+ payload := make([]byte, size)
+ rand.New(rand.NewSource(0)).Read(payload)
+
+ _, err := p.Write(ctx, payload)
+ require.NoError(t, err)
+
+ ids, err := p.Close(ctx)
+ require.NoError(t, err)
+ return ids
+}
+
+func TestDB_RawHead_SplitInfo(t *testing.T) {
+ t.Parallel()
+
+ const (
+ partSize = 10
+ partCount = 2
+ dataCount = 2
+ parityCount = 1
+ )
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ tt := new(testTarget)
+ p := transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: &pk.PrivateKey,
+ NextTargetInit: func() transformer.ObjectWriter { return tt },
+ NetworkState: epochState{e: 1},
+ MaxSize: partSize,
+ })
+
+ hdr := objectSDK.New()
+ hdr.SetContainerID(cnr)
+ hdr.SetOwnerID(usertest.ID())
+ ids := cutObject(t, p, hdr, partSize*partCount)
+ require.Equal(t, len(tt.objects), partCount+1)
+
+ t.Run("rep", func(t *testing.T) {
+ testGetRawSplitInfo(t, cnr, ids, tt.objects[partCount], tt.objects[partCount-1])
+ })
+ t.Run("with ec", func(t *testing.T) {
+ ec, err := erasurecode.NewConstructor(dataCount, parityCount)
+ require.NoError(t, err)
+
+ cs, err := ec.Split(tt.objects[partCount-1], &pk.PrivateKey)
+ require.NoError(t, err)
+
+ testGetRawSplitInfo(t, cnr, ids, tt.objects[partCount], cs[0])
+ })
+}
+
+func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIdentifiers, linking, lastPart *objectSDK.Object) {
+ expectedLinkID, ok := linking.ID()
+ require.True(t, ok)
+
+ t.Run("first last, then linking", func(t *testing.T) {
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ require.NoError(t, metaPut(db, lastPart, nil))
+ require.NoError(t, metaPut(db, linking, nil))
+
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(*ids.ParentID)
+
+ _, err := metaGet(db, addr, true)
+
+ var siErr *objectSDK.SplitInfoError
+ require.ErrorAs(t, err, &siErr)
+
+ lastID, ok := siErr.SplitInfo().LastPart()
+ require.True(t, ok)
+ require.Equal(t, ids.SelfID, lastID)
+
+ linkID, ok := siErr.SplitInfo().Link()
+ require.True(t, ok)
+ require.Equal(t, expectedLinkID, linkID)
+ })
+ t.Run("first linking, then last", func(t *testing.T) {
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ require.NoError(t, metaPut(db, linking, nil))
+ require.NoError(t, metaPut(db, lastPart, nil))
+
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(*ids.ParentID)
+
+ _, err := metaGet(db, addr, true)
+
+ var siErr *objectSDK.SplitInfoError
+ require.ErrorAs(t, err, &siErr)
+
+ lastID, ok := siErr.SplitInfo().LastPart()
+ require.True(t, ok)
+ require.Equal(t, ids.SelfID, lastID)
+
+ linkID, ok := siErr.SplitInfo().Link()
+ require.True(t, ok)
+ require.Equal(t, expectedLinkID, linkID)
+ })
+ t.Run("only last part", func(t *testing.T) {
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ require.NoError(t, metaPut(db, lastPart, nil))
+
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(*ids.ParentID)
+
+ _, err := metaGet(db, addr, true)
+
+ var siErr *objectSDK.SplitInfoError
+ require.ErrorAs(t, err, &siErr)
+
+ lastPart, ok := siErr.SplitInfo().LastPart()
+ require.True(t, ok)
+ require.Equal(t, ids.SelfID, lastPart)
+ })
+}
+
+func TestDB_SelectSplitID_EC(t *testing.T) {
+ t.Parallel()
+
+ const (
+ partSize = 10
+ partCount = 2
+ dataCount = 2
+ parityCount = 1
+ )
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ tt := new(testTarget)
+ p := transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: &pk.PrivateKey,
+ NextTargetInit: func() transformer.ObjectWriter { return tt },
+ NetworkState: epochState{e: 1},
+ MaxSize: partSize,
+ })
+
+ hdr := objectSDK.New()
+ hdr.SetContainerID(cnr)
+ hdr.SetOwnerID(usertest.ID())
+ cutObject(t, p, hdr, partSize*partCount)
+ require.Equal(t, len(tt.objects), partCount+1)
+
+ split := tt.objects[0].SplitID()
+ require.NotNil(t, split)
+
+ ec, err := erasurecode.NewConstructor(dataCount, parityCount)
+ require.NoError(t, err)
+
+ for i := range partCount {
+ cs, err := ec.Split(tt.objects[i], &pk.PrivateKey)
+ require.NoError(t, err)
+
+ require.NoError(t, putBig(db, cs[0]))
+ }
+
+ t.Run("not present", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddFilter(v2object.FilterHeaderSplitID, "", objectSDK.MatchNotPresent)
+ testSelect(t, db, cnr, fs)
+ })
+
+ t.Run("split id", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddFilter(v2object.FilterHeaderSplitID, split.String(), objectSDK.MatchStringEqual)
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(tt.objects[0]),
+ object.AddressOf(tt.objects[1]),
+ )
+ })
+
+ t.Run("empty split", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddFilter(v2object.FilterHeaderSplitID, "", objectSDK.MatchStringEqual)
+ testSelect(t, db, cnr, fs)
+ })
+
+ t.Run("unknown split id", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddFilter(v2object.FilterHeaderSplitID,
+ objectSDK.NewSplitID().String(),
+ objectSDK.MatchStringEqual)
+ testSelect(t, db, cnr, fs)
+ })
+}
+
+func TestDB_SelectSplitID(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+
+ child1 := testutil.GenerateObjectWithCID(cnr)
+ child2 := testutil.GenerateObjectWithCID(cnr)
+ child3 := testutil.GenerateObjectWithCID(cnr)
split1 := objectSDK.NewSplitID()
split2 := objectSDK.NewSplitID()
@@ -721,15 +1106,18 @@ func TestDB_SelectSplitID(t *testing.T) {
}
func TestDB_SelectContainerID(t *testing.T) {
+ t.Parallel()
+
db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
- obj1 := generateObjectWithCID(t, cnr)
+ obj1 := testutil.GenerateObjectWithCID(cnr)
err := putBig(db, obj1)
require.NoError(t, err)
- obj2 := generateObjectWithCID(t, cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
err = putBig(db, obj2)
require.NoError(t, err)
@@ -769,13 +1157,15 @@ func TestDB_SelectContainerID(t *testing.T) {
func BenchmarkSelect(b *testing.B) {
const objCount = 1000
db := newDB(b)
+ defer func() { require.NoError(b, db.Close(context.Background())) }()
+
cid := cidtest.ID()
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
var attr objectSDK.Attribute
attr.SetKey("myHeader")
attr.SetValue(strconv.Itoa(i))
- obj := generateObjectWithCID(b, cid)
+ obj := testutil.GenerateObjectWithCID(cid)
obj.SetAttributes(attr)
require.NoError(b, metaPut(db, obj, nil))
}
@@ -806,29 +1196,34 @@ func BenchmarkSelect(b *testing.B) {
}
func TestExpiredObjects(t *testing.T) {
+ t.Parallel()
+
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
cidExp, _ := exp.ContainerID()
cidNonExp, _ := nonExp.ContainerID()
- objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{})
+ objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}, false)
require.NoError(t, err)
require.Empty(t, objs) // expired object should not be returned
- objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{})
+ objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}, false)
require.NoError(t, err)
require.NotEmpty(t, objs)
})
}
func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) {
+ b.ReportAllocs()
+
var prm meta.SelectPrm
prm.SetContainerID(cid)
prm.SetFilters(fs)
- for i := 0; i < b.N; i++ {
- res, err := db.Select(prm)
+ for range b.N {
+ res, err := db.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
}
@@ -838,11 +1233,12 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
}
}
-func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
+func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters, useAttributeIndex bool) ([]oid.Address, error) {
var prm meta.SelectPrm
prm.SetFilters(fs)
prm.SetContainerID(cnr)
+ prm.SetUseAttributeIndex(useAttributeIndex)
- res, err := db.Select(prm)
+ res, err := db.Select(context.Background(), prm)
return res.AddressList(), err
}
diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go
index fac8a079f3..72618b1a0c 100644
--- a/pkg/local_object_storage/metabase/shard_id.go
+++ b/pkg/local_object_storage/metabase/shard_id.go
@@ -1,7 +1,14 @@
package meta
import (
- "github.com/nspcc-dev/neo-go/pkg/util/slice"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ metamode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"go.etcd.io/bbolt"
)
@@ -10,43 +17,81 @@ var (
shardIDKey = []byte("id")
)
-// ReadShardID reads shard id from db.
+// GetShardID sets metabase operation mode
+// and reads shard id from db.
// If id is missing, returns nil, nil.
-func (db *DB) ReadShardID() ([]byte, error) {
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
+//
+// GetShardID does not report any metrics.
+func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error) {
+ db.modeMtx.Lock()
+ defer db.modeMtx.Unlock()
+ db.mode = mode
- if db.mode.NoMetabase() {
- return nil, ErrDegradedMode
+ if _, err := os.Stat(db.info.Path); errors.Is(err, os.ErrNotExist) {
+ return nil, nil
}
+ if err := db.openDB(ctx, mode); err != nil {
+ return nil, fmt.Errorf("open metabase: %w", err)
+ }
+
+ id, err := db.readShardID()
+
+ if cErr := db.close(); cErr != nil {
+ err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
+ }
+
+ return id, metaerr.Wrap(err)
+}
+
+// ReadShardID reads shard id from db.
+// If id is missing, returns nil, nil.
+func (db *DB) readShardID() ([]byte, error) {
var id []byte
err := db.boltDB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket(shardInfoBucket)
if b != nil {
- id = slice.Copy(b.Get(shardIDKey))
+ id = bytes.Clone(b.Get(shardIDKey))
}
return nil
})
- return id, err
+ return id, metaerr.Wrap(err)
}
-// WriteShardID writes shard it to db.
-func (db *DB) WriteShardID(id []byte) error {
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
+// SetShardID sets metabase operation mode
+// and writes shard id to db.
+func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) error {
+ db.modeMtx.Lock()
+ defer db.modeMtx.Unlock()
+ db.mode = mode
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- } else if db.mode.ReadOnly() {
+ if mode.ReadOnly() {
return ErrReadOnlyMode
}
- return db.boltDB.Update(func(tx *bbolt.Tx) error {
+ if err := db.openDB(ctx, mode); err != nil {
+ return fmt.Errorf("open metabase: %w", err)
+ }
+
+ err := db.writeShardID(id)
+ if err == nil {
+ db.metrics.SetMode(metamode.ConvertToComponentModeDegraded(mode))
+ }
+
+ if cErr := db.close(); cErr != nil {
+ err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
+ }
+
+ return metaerr.Wrap(err)
+}
+
+// writeShardID writes shard id to db.
+func (db *DB) writeShardID(id []byte) error {
+ return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
return err
}
return b.Put(shardIDKey, id)
- })
+ }))
}
diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go
index ae309d4b2c..8f23765031 100644
--- a/pkg/local_object_storage/metabase/storage_id.go
+++ b/pkg/local_object_storage/metabase/storage_id.go
@@ -1,11 +1,16 @@
package meta
import (
- "errors"
+ "bytes"
+ "context"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// StorageIDPrm groups the parameters of StorageID operation.
@@ -30,36 +35,50 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
-func (db *DB) StorageID(prm StorageIDPrm) (res StorageIDRes, err error) {
+func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("StorageID", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
+ var res StorageIDRes
if db.mode.NoMetabase() {
return res, ErrDegradedMode
}
- err = db.boltDB.View(func(tx *bbolt.Tx) error {
- res.id, err = db.storageID(tx, prm.addr)
-
- return err
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ res.id = db.storageID(tx, prm.addr)
+ return nil
})
-
- return
+ success = err == nil
+ return res, metaerr.Wrap(err)
}
-func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) {
+func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte {
key := make([]byte, bucketKeySize)
smallBucket := tx.Bucket(smallBucketName(addr.Container(), key))
if smallBucket == nil {
- return nil, nil
+ return nil
}
storageID := smallBucket.Get(objectKey(addr.Object(), key))
if storageID == nil {
- return nil, nil
+ return nil
}
- return slice.Copy(storageID), nil
+ return bytes.Clone(storageID)
}
// UpdateStorageIDPrm groups the parameters of UpdateStorageID operation.
@@ -82,7 +101,22 @@ func (p *UpdateStorageIDPrm) SetStorageID(id []byte) {
}
// UpdateStorageID updates storage descriptor for objects from the blobstor.
-func (db *DB) UpdateStorageID(prm UpdateStorageIDPrm) (res UpdateStorageIDRes, err error) {
+func (db *DB) UpdateStorageID(ctx context.Context, prm UpdateStorageIDPrm) (res UpdateStorageIDRes, err error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("UpdateStorageID", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.UpdateStorageID",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.String("storage_id", string(prm.id)),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -92,16 +126,9 @@ func (db *DB) UpdateStorageID(prm UpdateStorageIDPrm) (res UpdateStorageIDRes, e
return res, ErrReadOnlyMode
}
- currEpoch := db.epochState.CurrentEpoch()
-
err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
- exists, err := db.exists(tx, prm.addr, currEpoch)
- if err == nil && exists || errors.Is(err, ErrObjectIsExpired) {
- err = updateStorageID(tx, prm.addr, prm.id)
- }
-
- return err
+ return setStorageID(tx, prm.addr, prm.id, true)
})
-
- return
+ success = err == nil
+ return res, metaerr.Wrap(err)
}
diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go
index 8ab61a13d1..fef680159a 100644
--- a/pkg/local_object_storage/metabase/storage_id_test.go
+++ b/pkg/local_object_storage/metabase/storage_id_test.go
@@ -1,19 +1,25 @@
package meta_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
)
func TestDB_StorageID(t *testing.T) {
- db := newDB(t)
+ t.Parallel()
- raw1 := generateObject(t)
- raw2 := generateObject(t)
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ raw1 := testutil.GenerateObject()
+ raw2 := testutil.GenerateObject()
+ deleted := testutil.GenerateObject()
storageID := []byte{1, 2, 3, 4}
@@ -30,6 +36,15 @@ func TestDB_StorageID(t *testing.T) {
err = putBig(db, raw2)
require.NoError(t, err)
+ // put object with storageID and delete it
+ err = metaPut(db, deleted, storageID)
+ require.NoError(t, err)
+
+ cnrID, ok := deleted.ContainerID()
+ require.True(t, ok)
+ ts := testutil.GenerateObjectWithCID(cnrID)
+ require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts).Object()))
+
// check StorageID for object without storageID
fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2))
require.NoError(t, err)
@@ -40,21 +55,58 @@ func TestDB_StorageID(t *testing.T) {
require.NoError(t, err)
require.Equal(t, storageID, fetchedStorageID)
+ // check StorageID for deleted object with storageID
+ fetchedStorageID, err = metaStorageID(db, object.AddressOf(deleted))
+ require.NoError(t, err)
+ require.Equal(t, storageID, fetchedStorageID)
+
t.Run("update", func(t *testing.T) {
+ storageID := []byte{1, 2, 3, 4, 5}
require.NoError(t, metaUpdateStorageID(db, object.AddressOf(raw2), storageID))
+ require.NoError(t, metaUpdateStorageID(db, object.AddressOf(deleted), storageID))
fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2))
require.NoError(t, err)
require.Equal(t, storageID, fetchedStorageID)
+
+ fetchedStorageID, err = metaStorageID(db, object.AddressOf(deleted))
+ require.NoError(t, err)
+ require.Equal(t, storageID, fetchedStorageID)
})
}
+func TestPutWritecacheDataRace(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close(context.Background())) }()
+
+ putStorageID := []byte{1, 2, 3}
+ wcStorageID := []byte{1, 2, 3, 4, 5}
+ o := testutil.GenerateObject()
+
+ fetchedStorageID, err := metaStorageID(db, object.AddressOf(o))
+ require.NoError(t, err)
+ require.Nil(t, fetchedStorageID)
+
+ // writecache flushes object and updates storageID before object actually saved to the metabase
+ metaUpdateStorageID(db, object.AddressOf(o), wcStorageID)
+
+ // put object completes with writecache's storageID
+ err = metaPut(db, o, putStorageID)
+ require.NoError(t, err)
+
+ fetchedStorageID, err = metaStorageID(db, object.AddressOf(o))
+ require.NoError(t, err)
+ require.Equal(t, wcStorageID, fetchedStorageID)
+}
+
func metaUpdateStorageID(db *meta.DB, addr oid.Address, id []byte) error {
var sidPrm meta.UpdateStorageIDPrm
sidPrm.SetAddress(addr)
sidPrm.SetStorageID(id)
- _, err := db.UpdateStorageID(sidPrm)
+ _, err := db.UpdateStorageID(context.Background(), sidPrm)
return err
}
@@ -62,6 +114,6 @@ func metaStorageID(db *meta.DB, addr oid.Address) ([]byte, error) {
var sidPrm meta.StorageIDPrm
sidPrm.SetAddress(addr)
- r, err := db.StorageID(sidPrm)
+ r, err := db.StorageID(context.Background(), sidPrm)
return r.StorageID(), err
}
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
new file mode 100644
index 0000000000..4948f34241
--- /dev/null
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -0,0 +1,602 @@
+package meta
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "sync/atomic"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ upgradeLogFrequency = 50_000
+ upgradeWorkersCount = 1_000
+ compactMaxTxSize = 256 << 20
+ upgradeTimeout = 1 * time.Second
+)
+
+var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error{
+ 2: upgradeFromV2ToV3,
+ 3: func(_ context.Context, _ *bbolt.DB, _ container.InfoProvider, log func(a ...any)) error {
+ log("metabase already upgraded")
+ return nil
+ },
+}
+
+func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoProvider, log func(a ...any)) error {
+ if _, err := os.Stat(path); err != nil {
+ return fmt.Errorf("check metabase existence: %w", err)
+ }
+ opts := bbolt.DefaultOptions
+ opts.Timeout = upgradeTimeout
+ db, err := bbolt.Open(path, os.ModePerm, opts)
+ if err != nil {
+ return fmt.Errorf("open metabase: %w", err)
+ }
+ var version uint64
+ if err := db.View(func(tx *bbolt.Tx) error {
+ var e error
+ version, e = currentVersion(tx)
+ return e
+ }); err != nil {
+ return err
+ }
+ updater, found := updates[version]
+ if !found {
+ return fmt.Errorf("unsupported version %d: no update available", version)
+ }
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(shardInfoBucket)
+ return b.Put(upgradeKey, zeroValue)
+ }); err != nil {
+ return fmt.Errorf("set upgrade key %w", err)
+ }
+ if err := updater(ctx, db, cs, log); err != nil {
+ return fmt.Errorf("update metabase schema: %w", err)
+ }
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(shardInfoBucket)
+ return b.Delete(upgradeKey)
+ }); err != nil {
+ return fmt.Errorf("delete upgrade key %w", err)
+ }
+ if compact {
+ log("compacting metabase...")
+ err := compactDB(db)
+ if err != nil {
+ return fmt.Errorf("compact metabase: %w", err)
+ }
+ log("metabase compacted")
+ }
+ return db.Close()
+}
+
+func compactDB(db *bbolt.DB) error {
+ sourcePath := db.Path()
+ tmpFileName := sourcePath + "." + time.Now().Format(time.RFC3339)
+ f, err := os.Stat(sourcePath)
+ if err != nil {
+ return err
+ }
+ dst, err := bbolt.Open(tmpFileName, f.Mode(), &bbolt.Options{
+ Timeout: 100 * time.Millisecond,
+ NoSync: true,
+ })
+ if err != nil {
+ return fmt.Errorf("open new metabase to compact: %w", err)
+ }
+ if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
+ return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
+ }
+ if err := dst.Sync(); err != nil {
+ return fmt.Errorf("sync compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ if err := dst.Close(); err != nil {
+ return fmt.Errorf("close compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("close source metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ if err := os.Rename(tmpFileName, sourcePath); err != nil {
+ return fmt.Errorf("replace source metabase with compacted: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ return nil
+}
+
+func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
+ if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
+ return err
+ }
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.Go(func() error {
+ return dropUserAttributes(ctx, db, cs, log)
+ })
+ eg.Go(func() error {
+ return dropOwnerIDIndex(ctx, db, log)
+ })
+ eg.Go(func() error {
+ return dropPayloadChecksumIndex(ctx, db, log)
+ })
+ if err := eg.Wait(); err != nil {
+ return err
+ }
+ return db.Update(func(tx *bbolt.Tx) error {
+ return updateVersion(tx, version)
+ })
+}
+
+type objectIDToExpEpoch struct {
+ containerID cid.ID
+ objectID oid.ID
+ expirationEpoch uint64
+}
+
+func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ log("filling expiration epoch buckets...")
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ _, err := tx.CreateBucketIfNotExists(expEpochToObjectBucketName)
+ return err
+ }); err != nil {
+ return err
+ }
+ objects := make(chan objectIDToExpEpoch)
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.Go(func() error {
+ return selectObjectsWithExpirationEpoch(ctx, db, objects)
+ })
+ var count atomic.Uint64
+ for range upgradeWorkersCount {
+ eg.Go(func() error {
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case obj, ok := <-objects:
+ if !ok {
+ return nil
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ if err := putUniqueIndexItem(tx, namedBucketItem{
+ name: expEpochToObjectBucketName,
+ key: expirationEpochKey(obj.expirationEpoch, obj.containerID, obj.objectID),
+ val: zeroValue,
+ }); err != nil {
+ return err
+ }
+ val := make([]byte, epochSize)
+ binary.LittleEndian.PutUint64(val, obj.expirationEpoch)
+ return putUniqueIndexItem(tx, namedBucketItem{
+ name: objectToExpirationEpochBucketName(obj.containerID, make([]byte, bucketKeySize)),
+ key: objectKey(obj.objectID, make([]byte, objectKeySize)),
+ val: val,
+ })
+ }); err != nil {
+ return err
+ }
+ }
+ if c := count.Add(1); c%upgradeLogFrequency == 0 {
+ log("expiration epoch filled for", c, "objects...")
+ }
+ }
+ })
+ }
+ err := eg.Wait()
+ if err != nil {
+ log("expiration epoch buckets completed completed with error:", err)
+ return err
+ }
+ log("filling expiration epoch buckets completed successfully, total", count.Load(), "objects")
+ return nil
+}
+
+func selectObjectsWithExpirationEpoch(ctx context.Context, db *bbolt.DB, objects chan objectIDToExpEpoch) error {
+ defer close(objects)
+
+ const batchSize = 1000
+ it := &objectsWithExpirationEpochBatchIterator{
+ lastAttributeKey: usrAttrPrefix,
+ }
+ for {
+ if err := getNextObjectsWithExpirationEpochBatch(ctx, db, it, batchSize); err != nil {
+ return err
+ }
+ for _, item := range it.items {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case objects <- item:
+ }
+ }
+
+ if len(it.items) < batchSize {
+ return nil
+ }
+ it.items = nil
+ }
+}
+
+var (
+ usrAttrPrefix = []byte{userAttributePrefix}
+ errBatchSizeLimit = errors.New("batch size limit")
+)
+
+type objectsWithExpirationEpochBatchIterator struct {
+ lastAttributeKey []byte
+ lastAttributeValue []byte
+ lastAttrKeyValueItem []byte
+ items []objectIDToExpEpoch
+}
+
+// - {prefix}{containerID}{attributeKey} <- bucket
+// -- {attributeValue} <- bucket, expirationEpoch
+// --- {objectID}: zeroValue <- record
+
+func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, it *objectsWithExpirationEpochBatchIterator, batchSize int) error {
+ seekAttrValue := it.lastAttributeValue
+ seekAttrKVItem := it.lastAttrKeyValueItem
+ err := db.View(func(tx *bbolt.Tx) error {
+ attrKeyC := tx.Cursor()
+ for attrKey, _ := attrKeyC.Seek(it.lastAttributeKey); attrKey != nil && bytes.HasPrefix(attrKey, usrAttrPrefix); attrKey, _ = attrKeyC.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if len(attrKey) <= 1+cidSize {
+ continue
+ }
+ attributeKey := string(attrKey[1+cidSize:])
+ if attributeKey != objectV2.SysAttributeExpEpoch {
+ continue
+ }
+ var containerID cid.ID
+ if err := containerID.Decode(attrKey[1 : 1+cidSize]); err != nil {
+ return fmt.Errorf("decode container id from user attribute bucket: %w", err)
+ }
+ if err := iterateExpirationAttributeKeyBucket(ctx, tx.Bucket(attrKey), it, batchSize, containerID, attrKey, seekAttrValue, seekAttrKVItem); err != nil {
+ return err
+ }
+ seekAttrValue = nil
+ seekAttrKVItem = nil
+ }
+ return nil
+ })
+ if err != nil && !errors.Is(err, errBatchSizeLimit) {
+ return err
+ }
+ return nil
+}
+
+func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, it *objectsWithExpirationEpochBatchIterator, batchSize int, containerID cid.ID, attrKey, seekAttrValue, seekAttrKVItem []byte) error {
+ attrValueC := b.Cursor()
+ for attrValue, v := attrValueC.Seek(seekAttrValue); attrValue != nil; attrValue, v = attrValueC.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if v != nil {
+ continue // need to iterate over buckets, not records
+ }
+ expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
+ if err != nil {
+ return fmt.Errorf("parse expiration epoch: %w", err)
+ }
+ expirationEpochBucket := b.Bucket(attrValue)
+ attrKeyValueC := expirationEpochBucket.Cursor()
+ for attrKeyValueItem, v := attrKeyValueC.Seek(seekAttrKVItem); attrKeyValueItem != nil; attrKeyValueItem, v = attrKeyValueC.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if v == nil {
+ continue // need to iterate over records, not buckets
+ }
+ if bytes.Equal(it.lastAttributeKey, attrKey) && bytes.Equal(it.lastAttributeValue, attrValue) && bytes.Equal(it.lastAttrKeyValueItem, attrKeyValueItem) {
+ continue
+ }
+ var objectID oid.ID
+ if err := objectID.Decode(attrKeyValueItem); err != nil {
+ return fmt.Errorf("decode object id from container '%s' expiration epoch %d: %w", containerID, expirationEpoch, err)
+ }
+ it.lastAttributeKey = bytes.Clone(attrKey)
+ it.lastAttributeValue = bytes.Clone(attrValue)
+ it.lastAttrKeyValueItem = bytes.Clone(attrKeyValueItem)
+ it.items = append(it.items, objectIDToExpEpoch{
+ containerID: containerID,
+ objectID: objectID,
+ expirationEpoch: expirationEpoch,
+ })
+ if len(it.items) == batchSize {
+ return errBatchSizeLimit
+ }
+ }
+ seekAttrKVItem = nil
+ }
+ return nil
+}
+
+func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
+ log("deleting user attribute buckets...")
+ const batch = 1000
+ prefix := []byte{userAttributePrefix}
+ last := prefix
+ var count uint64
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ var keys [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ c := tx.Cursor()
+ for k, _ := c.Seek(last); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
+ if bytes.Equal(last, k) {
+ continue
+ }
+ keys = append(keys, bytes.Clone(k))
+ }
+ return nil
+ }); err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
+ return err
+ }
+ if len(keys) == 0 {
+ log("deleting user attribute buckets completed successfully, deleted", count, "buckets")
+ return nil
+ }
+ last = keys[len(keys)-1]
+ cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys)
+ if err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
+ return err
+ }
+ count += cnt
+ cnt, err = dropEmptyUserAttributeBuckets(ctx, db, keys)
+ if err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
+ return err
+ }
+ count += cnt
+ log("deleted", count, "user attribute buckets")
+ }
+}
+
+func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
+ keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs)
+ if err != nil {
+ return 0, fmt.Errorf("select non indexed user attributes: %w", err)
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ for _, k := range keysToDrop {
+ if err := tx.DeleteBucket(k); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return 0, fmt.Errorf("drop non indexed user attributes: %w", err)
+ }
+ return uint64(len(keysToDrop)), nil
+}
+
+func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
+ var keysToDrop [][]byte
+ for _, key := range keys {
+ attr, ok := attributeFromAttributeBucket(key)
+ if !ok {
+ return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
+ }
+ if !IsAtrributeIndexed(attr) {
+ keysToDrop = append(keysToDrop, key)
+ continue
+ }
+ contID, ok := cidFromAttributeBucket(key)
+ if !ok {
+ return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
+ }
+ info, err := cs.Info(ctx, contID)
+ if err != nil {
+ return nil, err
+ }
+ if info.Removed || !info.Indexed {
+ keysToDrop = append(keysToDrop, key)
+ }
+ }
+ return keysToDrop, nil
+}
+
+func dropEmptyUserAttributeBuckets(ctx context.Context, db *bbolt.DB, keys [][]byte) (uint64, error) {
+ var dropBuckets [][]byte
+ for _, key := range keys {
+ select {
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ default:
+ }
+
+ if err := dropEmptyNestedBuckets(ctx, db, key); err != nil {
+ return 0, err
+ }
+
+ empty, exists, err := bucketIsEmpty(db, key)
+ if err != nil {
+ return 0, err
+ }
+ if empty && exists {
+ dropBuckets = append(dropBuckets, key)
+ }
+ }
+ if len(dropBuckets) == 0 {
+ return 0, nil
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ for _, key := range dropBuckets {
+ if err := tx.DeleteBucket(key); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return 0, fmt.Errorf("drop empty user attributes buckets: %w", err)
+ }
+ return uint64(len(dropBuckets)), nil
+}
+
+func bucketIsEmpty(db *bbolt.DB, bucketKey []byte) (bool, bool, error) {
+ var empty bool
+ var exists bool
+ if err := db.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(bucketKey)
+ if b == nil {
+ return nil
+ }
+ exists = true
+ empty = !hasAnyItem(b)
+ return nil
+ }); err != nil {
+ return false, false, fmt.Errorf("bucket empty check: %w", err)
+ }
+ return empty, exists, nil
+}
+
+func dropEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey []byte) error {
+ var last []byte
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var dropBuckets [][]byte
+ var err error
+ dropBuckets, last, err = selectEmptyNestedBuckets(ctx, db, rootBucketKey, last)
+ if err != nil {
+ return fmt.Errorf("select empty nested buckets: %w", err)
+ }
+ if len(dropBuckets) == 0 {
+ return nil
+ }
+
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ rootBucket := tx.Bucket(rootBucketKey)
+ if rootBucket == nil {
+ return nil
+ }
+ for _, sb := range dropBuckets {
+ if err := rootBucket.DeleteBucket(sb); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("drop empty nested buckets: %w", err)
+ }
+ }
+}
+
+func selectEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey, last []byte) ([][]byte, []byte, error) {
+ const batchSize = 1000
+ var result [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ rootBucket := tx.Bucket(rootBucketKey)
+ if rootBucket == nil {
+ return nil
+ }
+ c := rootBucket.Cursor()
+ for k, v := c.Seek(last); k != nil && len(result) < batchSize; k, v = c.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if bytes.Equal(last, k) {
+ continue
+ }
+ last = bytes.Clone(k)
+ if v != nil { // record
+ continue
+ }
+ nestedBucket := rootBucket.Bucket(k)
+ if nestedBucket == nil {
+ continue
+ }
+ if !hasAnyItem(nestedBucket) {
+ result = append(result, bytes.Clone(k))
+ }
+ }
+ return nil
+ }); err != nil {
+ return nil, nil, err
+ }
+ return result, last, nil
+}
+
+func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ return dropBucketsByPrefix(ctx, db, []byte{ownerPrefix}, func(a ...any) {
+ log(append([]any{"owner ID index:"}, a...)...)
+ })
+}
+
+func dropPayloadChecksumIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ return dropBucketsByPrefix(ctx, db, []byte{payloadHashPrefix}, func(a ...any) {
+ log(append([]any{"payload checksum:"}, a...)...)
+ })
+}
+
+func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log func(a ...any)) error {
+ log("deleting buckets...")
+ const batch = 1000
+ var count uint64
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ var keys [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ c := tx.Cursor()
+ for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
+ keys = append(keys, bytes.Clone(k))
+ }
+ return nil
+ }); err != nil {
+ log("deleting buckets completed with an error:", err)
+ return err
+ }
+ if len(keys) == 0 {
+ log("deleting buckets completed successfully, deleted", count, "buckets")
+ return nil
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ for _, k := range keys {
+ if err := tx.DeleteBucket(k); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ log("deleting buckets completed with an error:", err)
+ return err
+ }
+ count += uint64(len(keys))
+ log("deleted", count, "buckets")
+ }
+}
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
new file mode 100644
index 0000000000..c90de4dd6e
--- /dev/null
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -0,0 +1,222 @@
+//go:build integration
+
+package meta
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+const upgradeFilePath = "/path/to/metabase.v2"
+
+func TestUpgradeV2ToV3(t *testing.T) {
+ path := createTempCopy(t, upgradeFilePath)
+ defer func() {
+ require.NoError(t, os.Remove(path))
+ }()
+ db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t)))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion)
+ require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
+ fmt.Println()
+}
+
+type testContainerInfoProvider struct{}
+
+func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) {
+ return container.Info{}, nil
+}
+
+func createTempCopy(t *testing.T, path string) string {
+ src, err := os.Open(path)
+ require.NoError(t, err)
+
+ tmpPath := upgradeFilePath + time.Now().Format(time.RFC3339)
+ dest, err := os.Create(tmpPath)
+ require.NoError(t, err)
+
+ _, err = io.Copy(dest, src)
+ require.NoError(t, err)
+
+ require.NoError(t, src.Close())
+ require.NoError(t, dest.Close())
+
+ return tmpPath
+}
+
+func TestGenerateMetabaseFile(t *testing.T) {
+ t.Skip("for generating db")
+ const (
+ containersCount = 10_000
+ simpleObjectsCount = 500_000
+ complexObjectsCount = 500_000 // x2
+ deletedByGCMarksCount = 100_000
+ deletedByTombstoneCount = 100_000 // x2
+ lockedCount = 100_000 // x2
+
+ allocSize = 128 << 20
+ generateWorkersCount = 1_000
+ minEpoch = 1_000
+ maxFilename = 1_000
+ maxStorageID = 10_000
+ )
+
+ db := New(WithPath(upgradeFilePath), WithEpochState(epochState{e: minEpoch}), WithLogger(test.NewLogger(t)))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ db.boltDB.AllocSize = allocSize
+ db.boltDB.NoSync = true
+ require.NoError(t, db.Init(context.Background()))
+ containers := make([]cid.ID, containersCount)
+ for i := range containers {
+ containers[i] = cidtest.ID()
+ }
+ oc, err := db.ObjectCounters()
+ require.NoError(t, err)
+ require.True(t, oc.IsZero())
+ eg, ctx := errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // simple objects
+ for i := range simpleObjectsCount {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info(ctx, "simple objects generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // complex objects
+ for i := range complexObjectsCount {
+ i := i
+ eg.Go(func() error {
+ parent := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ child := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ child.SetParent(parent)
+ idParent, _ := parent.ID()
+ child.SetParentID(idParent)
+ testutil.AddAttribute(child, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(parent, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: child,
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info(ctx, "complex objects generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // simple objects deleted by gc marks
+ for i := range deletedByGCMarksCount {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ _, err = db.Inhume(ctx, InhumePrm{
+ target: []oid.Address{object.AddressOf(obj)},
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info(ctx, "simple objects deleted by gc marks generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(10000)
+ // simple objects deleted by tombstones
+ for i := range deletedByTombstoneCount {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ tomb := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ tomb.SetType(objectSDK.TypeTombstone)
+ _, err = db.Put(ctx, PutPrm{
+ obj: tomb,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ tombAddr := object.AddressOf(tomb)
+ _, err = db.Inhume(ctx, InhumePrm{
+ target: []oid.Address{object.AddressOf(obj)},
+ tomb: &tombAddr,
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info(ctx, "simple objects deleted by tombstones generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // simple objects locked by locks
+ for i := range lockedCount {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ lock := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ lock.SetType(objectSDK.TypeLock)
+ testutil.AddAttribute(lock, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ _, err = db.Put(ctx, PutPrm{
+ obj: lock,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ err = db.Lock(ctx, containers[i%len(containers)], object.AddressOf(lock).Object(), []oid.ID{object.AddressOf(obj).Object()})
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info(ctx, "simple objects locked by locks generated")
+ require.NoError(t, db.boltDB.Sync())
+ require.NoError(t, db.Close(context.Background()))
+}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index b60c97fd7f..4ad83332bf 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -1,12 +1,14 @@
package meta
import (
- "bytes"
"crypto/sha256"
+ "encoding/binary"
+ "errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
)
@@ -19,11 +21,15 @@ var (
graveyardBucketName = []byte{graveyardPrefix}
// garbageBucketName stores rows with the objects that should be physically
// deleted by the node (Garbage Collector routine).
- garbageBucketName = []byte{garbagePrefix}
- toMoveItBucketName = []byte{toMoveItPrefix}
- containerVolumeBucketName = []byte{containerVolumePrefix}
+ garbageBucketName = []byte{garbagePrefix}
+ toMoveItBucketName = []byte{toMoveItPrefix}
+ containerVolumeBucketName = []byte{containerVolumePrefix}
+ containerCounterBucketName = []byte{containerCountersPrefix}
+ expEpochToObjectBucketName = []byte{expirationEpochToObjectPrefix}
zeroValue = []byte{0xFF}
+
+ errInvalidLength = errors.New("invalid length")
)
// Prefix bytes for database keys. All ids and addresses are encoded in binary
@@ -65,10 +71,10 @@ const (
// Key: object ID
// Value: marshalled object
lockersPrefix
- // storageGroupPrefix is used for prefixing buckets containing objects of STORAGEGROUP type.
+ // _ is unused. Previous usage was for prefixing buckets containing objects of STORAGEGROUP type.
// Key: object ID
// Value: marshaled object
- storageGroupPrefix
+ _
// tombstonePrefix is used for prefixing buckets containing objects of TOMBSTONE type.
// Key: object ID
// Value: marshaled object
@@ -86,11 +92,12 @@ const (
// FKBT index buckets.
// ====================
- // ownerPrefix is used for prefixing FKBT index buckets mapping owner to object IDs.
+ // ownerPrefix was used for prefixing FKBT index buckets mapping owner to object IDs.
// Key: owner ID
// Value: bucket containing object IDs as keys
+ // removed in version 3
ownerPrefix
- // userAttributePrefix is used for prefixing FKBT index buckets containing objects.
+ // userAttributePrefix was used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
userAttributePrefix
@@ -99,9 +106,10 @@ const (
// List index buckets.
// ====================
- // payloadHashPrefix is used for prefixing List index buckets mapping payload hash to a list of object IDs.
+ // payloadHashPrefix was used for prefixing List index buckets mapping payload hash to a list of object IDs.
// Key: payload hash
// Value: list of object IDs
+ // removed in version 3
payloadHashPrefix
// parentPrefix is used for prefixing List index buckets mapping parent ID to a list of children IDs.
// Key: parent ID
@@ -111,6 +119,26 @@ const (
// Key: split ID
// Value: list of object IDs
splitPrefix
+
+ // containerCountersPrefix is used for storing container object counters.
+ // Key: container ID + type
+ // Value: container size in bytes as little-endian uint64
+ containerCountersPrefix
+
+ // ecInfoPrefix is used for storing relation between EC parent id and chunk id.
+ // Key: container ID + type
+ // Value: Object id
+ ecInfoPrefix
+
+ // expirationEpochToObjectPrefix is used for storing relation between expiration epoch and object id.
+ // Key: expiration epoch + object address
+ // Value: zero
+ expirationEpochToObjectPrefix
+
+ // objectToExpirationEpochPrefix is used for storing relation between expiration epoch and object id.
+ // Key: object address
+ // Value: expiration epoch
+ objectToExpirationEpochPrefix
)
const (
@@ -118,10 +146,9 @@ const (
bucketKeySize = 1 + cidSize
objectKeySize = sha256.Size
addressKeySize = cidSize + objectKeySize
+ epochSize = 8
)
-var splitInfoError *object.SplitInfoError // for errors.As comparisons
-
func bucketName(cnr cid.ID, prefix byte, key []byte) []byte {
key[0] = prefix
cnr.Encode(key[1:])
@@ -138,35 +165,31 @@ func tombstoneBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, tombstonePrefix, key)
}
-// storageGroupBucketName returns _SG.
-func storageGroupBucketName(cnr cid.ID, key []byte) []byte {
- return bucketName(cnr, storageGroupPrefix, key)
-}
-
// smallBucketName returns _small.
func smallBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, smallPrefix, key)
}
-// attributeBucketName returns _attr_.
+// attributeBucketName returns _.
func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
key[0] = userAttributePrefix
cnr.Encode(key[1:])
return append(key[:bucketKeySize], attributeKey...)
}
-// returns from attributeBucketName result, nil otherwise.
-func cidFromAttributeBucket(val []byte, attributeKey string) []byte {
- if len(val) < bucketKeySize || val[0] != userAttributePrefix || !bytes.Equal(val[bucketKeySize:], []byte(attributeKey)) {
- return nil
+func cidFromAttributeBucket(bucketName []byte) (cid.ID, bool) {
+ if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
+ return cid.ID{}, false
}
-
- return val[1:bucketKeySize]
+ var result cid.ID
+ return result, result.Decode(bucketName[1:bucketKeySize]) == nil
}
-// payloadHashBucketName returns _payloadhash.
-func payloadHashBucketName(cnr cid.ID, key []byte) []byte {
- return bucketName(cnr, payloadHashPrefix, key)
+func attributeFromAttributeBucket(bucketName []byte) (string, bool) {
+ if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
+ return "", false
+ }
+ return string(bucketName[bucketKeySize:]), true
}
// rootBucketName returns _root.
@@ -174,11 +197,6 @@ func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
}
-// ownerBucketName returns _ownerid.
-func ownerBucketName(cnr cid.ID, key []byte) []byte {
- return bucketName(cnr, ownerPrefix, key)
-}
-
// parentBucketName returns _parent.
func parentBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, parentPrefix, key)
@@ -189,6 +207,40 @@ func splitBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, splitPrefix, key)
}
+// ecInfoBucketName returns _ecinfo.
+func ecInfoBucketName(cnr cid.ID, key []byte) []byte {
+ return bucketName(cnr, ecInfoPrefix, key)
+}
+
+// objectToExpirationEpochBucketName returns objectToExpirationEpochPrefix_.
+func objectToExpirationEpochBucketName(cnr cid.ID, key []byte) []byte {
+ return bucketName(cnr, objectToExpirationEpochPrefix, key)
+}
+
+func expirationEpochKey(epoch uint64, cnr cid.ID, obj oid.ID) []byte {
+ result := make([]byte, epochSize+addressKeySize)
+ binary.BigEndian.PutUint64(result, epoch)
+ cnr.Encode(result[epochSize:])
+ obj.Encode(result[epochSize+cidSize:])
+ return result
+}
+
+func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) {
+ if len(key) != epochSize+addressKeySize {
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("unexpected expiration epoch to object key length: %d", len(key))
+ }
+ epoch := binary.BigEndian.Uint64(key)
+ var cnr cid.ID
+ if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err)
+ }
+ var obj oid.ID
+ if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err)
+ }
+ return epoch, cnr, obj, nil
+}
+
// addressKey returns key for K-V tables when key is a whole address.
func addressKey(addr oid.Address, key []byte) []byte {
addr.Container().Encode(key)
@@ -199,7 +251,7 @@ func addressKey(addr oid.Address, key []byte) []byte {
// parses object address formed by addressKey.
func decodeAddressFromKey(dst *oid.Address, k []byte) error {
if len(k) != addressKeySize {
- return fmt.Errorf("invalid length")
+ return errInvalidLength
}
var cnr cid.ID
@@ -226,20 +278,17 @@ func objectKey(obj oid.ID, key []byte) []byte {
// if meets irregular object container in objs - returns its type, otherwise returns object.TypeRegular.
//
// firstIrregularObjectType(tx, cnr, obj) usage allows getting object type.
-func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) object.Type {
- if len(objs) == 0 {
- panic("empty object list in firstIrregularObjectType")
- }
+func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type {
+ assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType")
- var keys [3][1 + cidSize]byte
+ var keys [2][1 + cidSize]byte
irregularTypeBuckets := [...]struct {
- typ object.Type
+ typ objectSDK.Type
name []byte
}{
- {object.TypeTombstone, tombstoneBucketName(idCnr, keys[0][:])},
- {object.TypeStorageGroup, storageGroupBucketName(idCnr, keys[1][:])},
- {object.TypeLock, bucketNameLockers(idCnr, keys[2][:])},
+ {objectSDK.TypeTombstone, tombstoneBucketName(idCnr, keys[0][:])},
+ {objectSDK.TypeLock, bucketNameLockers(idCnr, keys[1][:])},
}
for i := range objs {
@@ -250,7 +299,7 @@ func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) object
}
}
- return object.TypeRegular
+ return objectSDK.TypeRegular
}
// return true if provided object is of LOCK type.
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index 5748b64ee9..fbc0f1ad9d 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -2,6 +2,7 @@ package meta
import (
"encoding/binary"
+ "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -9,15 +10,22 @@ import (
)
// version contains current metabase version.
-const version = 2
+const version = 3
-var versionKey = []byte("version")
+var (
+ versionKey = []byte("version")
+ upgradeKey = []byte("upgrade")
+)
// ErrOutdatedVersion is returned on initializing
// an existing metabase that is not compatible with
// the current code version.
var ErrOutdatedVersion = logicerr.New("invalid version, resynchronization is required")
+var ErrIncompletedUpgrade = logicerr.New("metabase upgrade is not completed")
+
+var errVersionUndefinedNoInfoBucket = errors.New("version undefined: no info bucket")
+
func checkVersion(tx *bbolt.Tx, initialized bool) error {
var knownVersion bool
@@ -32,6 +40,10 @@ func checkVersion(tx *bbolt.Tx, initialized bool) error {
return fmt.Errorf("%w: expected=%d, stored=%d", ErrOutdatedVersion, version, stored)
}
}
+ data = b.Get(upgradeKey)
+ if len(data) > 0 {
+ return ErrIncompletedUpgrade
+ }
}
if !initialized {
@@ -55,7 +67,19 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
- return fmt.Errorf("can't create auxiliary bucket: %w", err)
+ return fmt.Errorf("create auxiliary bucket: %w", err)
}
return b.Put(versionKey, data)
}
+
+func currentVersion(tx *bbolt.Tx) (uint64, error) {
+ b := tx.Bucket(shardInfoBucket)
+ if b == nil {
+ return 0, errVersionUndefinedNoInfoBucket
+ }
+ data := b.Get(versionKey)
+ if len(data) != 8 {
+ return 0, fmt.Errorf("version undefined: invalid version data length %d", len(data))
+ }
+ return binary.LittleEndian.Uint64(data), nil
+}
diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go
index 70ded67a4f..b373fb32ed 100644
--- a/pkg/local_object_storage/metabase/version_test.go
+++ b/pkg/local_object_storage/metabase/version_test.go
@@ -1,12 +1,14 @@
package meta
import (
+ "context"
"encoding/binary"
"errors"
"fmt"
"path/filepath"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
@@ -22,7 +24,7 @@ func TestVersion(t *testing.T) {
newDB := func(t *testing.T) *DB {
return New(WithPath(filepath.Join(dir, t.Name())),
- WithPermissions(0600), WithEpochState(epochStateImpl{}))
+ WithPermissions(0o600), WithEpochState(epochStateImpl{}))
}
check := func(t *testing.T, db *DB) {
require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
@@ -42,46 +44,64 @@ func TestVersion(t *testing.T) {
}
t.Run("simple", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.Open(false))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
t.Run("reopen", func(t *testing.T) {
- require.NoError(t, db.Open(false))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
})
})
t.Run("old data", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.Open(false))
- require.NoError(t, db.WriteShardID([]byte{1, 2, 3, 4}))
- require.NoError(t, db.Close())
+ require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite))
- require.NoError(t, db.Open(false))
- require.NoError(t, db.Init())
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
})
t.Run("invalid version", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.Open(false))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return updateVersion(tx, version+1)
}))
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
- require.NoError(t, db.Open(false))
- require.Error(t, db.Init())
- require.NoError(t, db.Close())
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.Error(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
t.Run("reset", func(t *testing.T) {
- require.NoError(t, db.Open(false))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.Reset())
check(t, db)
- require.NoError(t, db.Close())
+ require.NoError(t, db.Close(context.Background()))
})
})
+ t.Run("incompleted upgrade", func(t *testing.T) {
+ db := newDB(t)
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue)
+ }))
+ require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade)
+ require.NoError(t, db.Close(context.Background()))
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return tx.Bucket(shardInfoBucket).Delete(upgradeKey)
+ }))
+ require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Close(context.Background()))
+ })
}
diff --git a/pkg/local_object_storage/metrics/blobovnicza.go b/pkg/local_object_storage/metrics/blobovnicza.go
new file mode 100644
index 0000000000..460d6e638a
--- /dev/null
+++ b/pkg/local_object_storage/metrics/blobovnicza.go
@@ -0,0 +1,123 @@
+package metrics
+
+import (
+ "time"
+
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+func NewBlobovniczaTreeMetrics(path string, m metrics_impl.BlobobvnizcaMetrics) blobovniczatree.Metrics {
+ return &blobovniczaTreeMetrics{
+ path: path,
+ shardID: undefined,
+ m: m,
+ }
+}
+
+type blobovniczaTreeMetrics struct {
+ shardID string
+ path string
+ m metrics_impl.BlobobvnizcaMetrics
+}
+
+func (m *blobovniczaTreeMetrics) Blobovnicza() blobovnicza.Metrics {
+ return &blobovniczaMetrics{
+ shardID: func() string { return m.shardID },
+ path: m.path,
+ m: m.m,
+ }
+}
+
+func (m *blobovniczaTreeMetrics) SetParentID(parentID string) {
+ m.shardID = parentID
+}
+
+func (m *blobovniczaTreeMetrics) SetMode(mod mode.ComponentMode) {
+ m.m.SetBlobobvnizcaTreeMode(m.shardID, m.path, mod)
+}
+
+func (m *blobovniczaTreeMetrics) Close() {
+ m.m.CloseBlobobvnizcaTree(m.shardID, m.path)
+}
+
+func (m *blobovniczaTreeMetrics) SetRebuildStatus(status string) {
+ m.m.BlobovniczaTreeRebuildStatus(m.shardID, m.path, status)
+}
+
+func (m *blobovniczaTreeMetrics) SetRebuildPercent(value uint32) {
+ m.m.BlobovniczaTreeRebuildPercent(m.shardID, m.path, value)
+}
+
+func (m *blobovniczaTreeMetrics) ObjectMoved(d time.Duration) {
+ m.m.BlobovniczaTreeObjectMoved(m.shardID, m.path, d)
+}
+
+func (m *blobovniczaTreeMetrics) Delete(d time.Duration, success, withStorageID bool) {
+ m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Delete", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID})
+}
+
+func (m *blobovniczaTreeMetrics) Exists(d time.Duration, success, withStorageID bool) {
+ m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Exists", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID})
+}
+
+func (m *blobovniczaTreeMetrics) GetRange(d time.Duration, size int, success, withStorageID bool) {
+ m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "GetRange", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID})
+ if success {
+ m.m.AddBlobobvnizcaTreeGet(m.shardID, m.path, size)
+ }
+}
+
+func (m *blobovniczaTreeMetrics) Get(d time.Duration, size int, success, withStorageID bool) {
+ m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Get", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID})
+ if success {
+ m.m.AddBlobobvnizcaTreeGet(m.shardID, m.path, size)
+ }
+}
+
+func (m *blobovniczaTreeMetrics) Iterate(d time.Duration, success bool) {
+ m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Iterate", d, success, metrics_impl.NullBool{})
+}
+
+func (m *blobovniczaTreeMetrics) Put(d time.Duration, size int, success bool) {
+ m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Put", d, success, metrics_impl.NullBool{})
+ if success {
+ m.m.AddBlobobvnizcaTreePut(m.shardID, m.path, size)
+ }
+}
+
+func (m *blobovniczaTreeMetrics) ObjectsCount(d time.Duration, success bool) {
+ m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "ObjectsCount", d, success, metrics_impl.NullBool{})
+}
+
+type blobovniczaMetrics struct {
+ m metrics_impl.BlobobvnizcaMetrics
+ shardID func() string
+ path string
+}
+
+func (m *blobovniczaMetrics) AddOpenBlobovniczaSize(size uint64) {
+ m.m.AddOpenBlobovniczaSize(m.shardID(), m.path, size)
+}
+
+func (m *blobovniczaMetrics) SubOpenBlobovniczaSize(size uint64) {
+ m.m.SubOpenBlobovniczaSize(m.shardID(), m.path, size)
+}
+
+func (m *blobovniczaMetrics) IncOpenBlobovniczaCount() {
+ m.m.IncOpenBlobovniczaCount(m.shardID(), m.path)
+}
+
+func (m *blobovniczaMetrics) DecOpenBlobovniczaCount() {
+ m.m.DecOpenBlobovniczaCount(m.shardID(), m.path)
+}
+
+func (m *blobovniczaMetrics) AddOpenBlobovniczaItems(items uint64) {
+ m.m.AddOpenBlobovniczaItems(m.shardID(), m.path, items)
+}
+
+func (m *blobovniczaMetrics) SubOpenBlobovniczaItems(items uint64) {
+ m.m.SubOpenBlobovniczaItems(m.shardID(), m.path, items)
+}
diff --git a/pkg/local_object_storage/metrics/blobstore.go b/pkg/local_object_storage/metrics/blobstore.go
new file mode 100644
index 0000000000..9a41f01d07
--- /dev/null
+++ b/pkg/local_object_storage/metrics/blobstore.go
@@ -0,0 +1,69 @@
+package metrics
+
+import (
+ "time"
+
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+)
+
+type blobstoreMetrics struct {
+ shardID string
+ m metrics_impl.BlobstoreMetrics
+}
+
+func NewBlobstoreMetrics(m metrics_impl.BlobstoreMetrics) blobstor.Metrics {
+ return &blobstoreMetrics{
+ shardID: undefined,
+ m: m,
+ }
+}
+
+func (m *blobstoreMetrics) SetParentID(parentID string) {
+ m.shardID = parentID
+}
+
+func (m *blobstoreMetrics) SetMode(readOnly bool) {
+ m.m.SetMode(m.shardID, readOnly)
+}
+
+func (m *blobstoreMetrics) Close() {
+ m.m.Close(m.shardID)
+}
+
+func (m *blobstoreMetrics) Delete(d time.Duration, success, withStorageID bool) {
+ m.m.MethodDuration(m.shardID, "Delete", d, success, metrics_impl.NullBool{Bool: withStorageID, Valid: true})
+}
+
+func (m *blobstoreMetrics) Exists(d time.Duration, success, withStorageID bool) {
+ m.m.MethodDuration(m.shardID, "Exists", d, success, metrics_impl.NullBool{Bool: withStorageID, Valid: true})
+}
+
+func (m *blobstoreMetrics) GetRange(d time.Duration, size int, success, withStorageID bool) {
+ m.m.MethodDuration(m.shardID, "GetRange", d, success, metrics_impl.NullBool{Bool: withStorageID, Valid: true})
+ if success {
+ m.m.AddGet(m.shardID, size)
+ }
+}
+
+func (m *blobstoreMetrics) Get(d time.Duration, size int, success, withStorageID bool) {
+ m.m.MethodDuration(m.shardID, "Get", d, success, metrics_impl.NullBool{Bool: withStorageID, Valid: true})
+ if success {
+ m.m.AddGet(m.shardID, size)
+ }
+}
+
+func (m *blobstoreMetrics) Iterate(d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, "Iterate", d, success, metrics_impl.NullBool{})
+}
+
+func (m *blobstoreMetrics) Put(d time.Duration, size int, success bool) {
+ m.m.MethodDuration(m.shardID, "Put", d, success, metrics_impl.NullBool{})
+ if success {
+ m.m.AddPut(m.shardID, size)
+ }
+}
+
+func (m *blobstoreMetrics) ObjectsCount(d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, "ObjectsCount", d, success, metrics_impl.NullBool{})
+}
diff --git a/pkg/local_object_storage/metrics/consts.go b/pkg/local_object_storage/metrics/consts.go
new file mode 100644
index 0000000000..5199307100
--- /dev/null
+++ b/pkg/local_object_storage/metrics/consts.go
@@ -0,0 +1,3 @@
+package metrics
+
+const undefined = "undefined"
diff --git a/pkg/local_object_storage/metrics/fstree.go b/pkg/local_object_storage/metrics/fstree.go
new file mode 100644
index 0000000000..d93363fa3b
--- /dev/null
+++ b/pkg/local_object_storage/metrics/fstree.go
@@ -0,0 +1,76 @@
+package metrics
+
+import (
+ "time"
+
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+func NewFSTreeMetricsWithoutShardID(path string, m metrics_impl.FSTreeMetrics) fstree.Metrics {
+ return &fstreeMetrics{
+ shardID: undefined,
+ path: path,
+ m: m,
+ }
+}
+
+type fstreeMetrics struct {
+ shardID string
+ path string
+ m metrics_impl.FSTreeMetrics
+}
+
+func (m *fstreeMetrics) SetParentID(parentID string) {
+ m.shardID = parentID
+}
+
+func (m *fstreeMetrics) SetMode(mod mode.ComponentMode) {
+ m.m.SetMode(m.shardID, m.path, mod)
+}
+
+func (m *fstreeMetrics) Close() {
+ m.m.Close(m.shardID, m.path)
+}
+
+func (m *fstreeMetrics) Iterate(d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "Iterate", d, success)
+}
+
+func (m *fstreeMetrics) IterateInfo(d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "IterateInfo", d, success)
+}
+
+func (m *fstreeMetrics) Delete(d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "Delete", d, success)
+}
+
+func (m *fstreeMetrics) Exists(d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "Exists", d, success)
+}
+
+func (m *fstreeMetrics) Put(d time.Duration, size int, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "Put", d, success)
+ if success {
+ m.m.AddPut(m.shardID, m.path, size)
+ }
+}
+
+func (m *fstreeMetrics) Get(d time.Duration, size int, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "Get", d, success)
+ if success {
+ m.m.AddGet(m.shardID, m.path, size)
+ }
+}
+
+func (m *fstreeMetrics) GetRange(d time.Duration, size int, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "GetRange", d, success)
+ if success {
+ m.m.AddGet(m.shardID, m.path, size)
+ }
+}
+
+func (m *fstreeMetrics) ObjectsCount(d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "ObjectsCount", d, success)
+}
diff --git a/pkg/local_object_storage/metrics/metabase.go b/pkg/local_object_storage/metrics/metabase.go
new file mode 100644
index 0000000000..e962e37cbb
--- /dev/null
+++ b/pkg/local_object_storage/metrics/metabase.go
@@ -0,0 +1,39 @@
+package metrics
+
+import (
+ "time"
+
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+func NewMetabaseMetrics(path string, m metrics_impl.MetabaseMetrics) meta.Metrics {
+ return &metabaseMetrics{
+ shardID: undefined,
+ path: path,
+ m: m,
+ }
+}
+
+type metabaseMetrics struct {
+ shardID string
+ path string
+ m metrics_impl.MetabaseMetrics
+}
+
+func (m *metabaseMetrics) SetParentID(parentID string) {
+ m.shardID = parentID
+}
+
+func (m *metabaseMetrics) SetMode(mode mode.ComponentMode) {
+ m.m.SetMode(m.shardID, m.path, mode.String())
+}
+
+func (m *metabaseMetrics) Close() {
+ m.m.Close(m.shardID, m.path)
+}
+
+func (m *metabaseMetrics) AddMethodDuration(method string, d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, method, d, success)
+}
diff --git a/pkg/local_object_storage/metrics/pilorama.go b/pkg/local_object_storage/metrics/pilorama.go
new file mode 100644
index 0000000000..050b769a04
--- /dev/null
+++ b/pkg/local_object_storage/metrics/pilorama.go
@@ -0,0 +1,37 @@
+package metrics
+
+import (
+ "time"
+
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+func NewPiloramaMetrics(m metrics_impl.PiloramaMetrics) pilorama.Metrics {
+ return &piloramaMetrics{
+ shardID: undefined,
+ m: m,
+ }
+}
+
+type piloramaMetrics struct {
+ shardID string
+ m metrics_impl.PiloramaMetrics
+}
+
+func (m *piloramaMetrics) SetParentID(id string) {
+ m.shardID = id
+}
+
+func (m *piloramaMetrics) SetMode(mod mode.ComponentMode) {
+ m.m.SetMode(m.shardID, mod)
+}
+
+func (m *piloramaMetrics) Close() {
+ m.m.Close(m.shardID)
+}
+
+func (m *piloramaMetrics) AddMethodDuration(method string, d time.Duration, success bool) {
+ m.m.AddMethodDuration(m.shardID, method, d, success)
+}
diff --git a/pkg/local_object_storage/pilorama/batch.go b/pkg/local_object_storage/pilorama/batch.go
index 3065c8370b..4c5238921b 100644
--- a/pkg/local_object_storage/pilorama/batch.go
+++ b/pkg/local_object_storage/pilorama/batch.go
@@ -1,7 +1,9 @@
package pilorama
import (
- "sort"
+ "cmp"
+ "encoding/binary"
+ "slices"
"sync"
"time"
@@ -46,14 +48,71 @@ func (b *batch) run() {
// Sorting without a mutex is ok, because we append to this slice only if timer is non-nil.
// See (*boltForest).addBatch for details.
- sort.Slice(b.operations, func(i, j int) bool {
- return b.operations[i].Time < b.operations[j].Time
+ slices.SortFunc(b.operations, func(mi, mj *Move) int {
+ return cmp.Compare(mi.Time, mj.Time)
})
+ b.operations = slices.CompactFunc(b.operations, func(x, y *Move) bool { return x.Time == y.Time })
- var lm Move
- return b.forest.applyOperation(bLog, bTree, b.operations, &lm)
+ // Our main use-case is addition of new items. In this case,
+ // we do not need to perform undo()/redo(), just do().
+ // https://github.com/trvedata/move-op/blob/6c23447c12a7862ff31b7fc2205f6c90fbdb9dc0/proof/Move_Create.thy#L259
+ //
+ // For this optimization to work we need to ensure three things:
+ // 1. The node itself is not yet in tree.
+ // 2. The node is not a parent. This case is not mentioned in the article, because
+ // they consider a "static order" (perform all CREATE operations before MOVE).
+ // We need this because if node _is_ a parent, we could violate (3) for some late operation.
+ // See TestForest_ApplySameOperation for details.
+ // 3. Parent of each operation is already in tree.
+ var parents map[uint64]struct{}
+ var cKey [maxKeySize]byte
+ var slow bool
+ for i := range b.operations {
+ _, _, _, inTree := b.forest.getState(bTree, stateKey(cKey[:], b.operations[i].Child))
+ if inTree {
+ slow = true
+ break
+ }
+
+ key := childrenKey(cKey[:], b.operations[i].Child, 0)
+ k, _ := bTree.Cursor().Seek(key)
+ if len(k) == childrenKeySize && binary.LittleEndian.Uint64(k[1:]) == b.operations[i].Child {
+ slow = true
+ break
+ }
+
+ if b.operations[i].Parent == RootID {
+ continue
+ } else if parents == nil {
+ // Attaching key only to root is done frequently,
+ // no allocations are performed unless necessary.
+ parents = make(map[uint64]struct{})
+ } else if _, ok := parents[b.operations[i].Parent]; ok {
+ continue
+ }
+
+ p := b.operations[i].Parent
+ _, ts, _, inTree := b.forest.getState(bTree, stateKey(cKey[:], p))
+ if !inTree || b.operations[0].Time < ts {
+ slow = true
+ break
+ }
+ parents[b.operations[i].Parent] = struct{}{}
+ }
+
+ if slow {
+ var lm Move
+ return b.forest.applyOperation(bLog, bTree, b.operations, &lm)
+ }
+
+ for i := range b.operations {
+ if err := b.forest.do(bLog, bTree, cKey[:], b.operations[i]); err != nil {
+ return err
+ }
+ }
+ return nil
})
- for i := range b.operations {
+ for i := range b.results {
b.results[i] <- err
}
}
diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go
new file mode 100644
index 0000000000..3156751f2c
--- /dev/null
+++ b/pkg/local_object_storage/pilorama/bench_test.go
@@ -0,0 +1,57 @@
+package pilorama
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync/atomic"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+func getTimestamp(reorder int, ts Timestamp) Timestamp {
+ base := ts / Timestamp(reorder)
+ rem := ts % Timestamp(reorder)
+ return base*Timestamp(reorder) + Timestamp(reorder) - rem
+}
+
+func BenchmarkCreate(b *testing.B) {
+ // Use `os.TempDir` because we construct multiple times in the same test.
+ tmpDir, err := os.MkdirTemp(os.TempDir(), "*")
+ require.NoError(b, err)
+
+ f := NewBoltForest(
+ WithPath(filepath.Join(tmpDir, "test.db")),
+ WithMaxBatchSize(runtime.GOMAXPROCS(0)))
+ require.NoError(b, f.Open(context.Background(), mode.ReadWrite))
+ require.NoError(b, f.Init(context.Background()))
+ defer func() { require.NoError(b, f.Close(context.Background())) }()
+
+ b.Cleanup(func() {
+ require.NoError(b, os.RemoveAll(tmpDir))
+ })
+
+ cid := cidtest.ID()
+ treeID := "tree"
+ ctx := context.Background()
+ var index atomic.Int32
+ index.Store(-1)
+ b.SetParallelism(2)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ i := index.Add(1)
+ op := &Move{
+ Meta: Meta{Time: getTimestamp(runtime.GOMAXPROCS(0)*2, Timestamp(i+1))},
+ Child: Node(i + 1),
+ Parent: RootID,
+ }
+ if err := f.TreeApply(ctx, cid, treeID, op, true); err != nil {
+ b.FailNow()
+ }
+ }
+ })
+}
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index b47fa16e83..897b37ea0e 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -2,21 +2,28 @@ package pilorama
import (
"bytes"
+ "context"
"encoding/binary"
"errors"
"fmt"
"math/rand"
"os"
"path/filepath"
+ "slices"
+ "strconv"
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type boltForest struct {
@@ -32,6 +39,11 @@ type boltForest struct {
cfg
}
+const (
+ childrenKeySize = 17
+ maxKeySize = childrenKeySize
+)
+
var (
dataBucket = []byte{0}
logBucket = []byte{1}
@@ -66,7 +78,10 @@ func NewBoltForest(opts ...Option) ForestStorage {
perm: os.ModePerm,
maxBatchDelay: bbolt.DefaultMaxBatchDelay,
maxBatchSize: bbolt.DefaultMaxBatchSize,
+ openFile: os.OpenFile,
+ metrics: &noopMetrics{},
},
+ mode: mode.Disabled,
}
for i := range opts {
@@ -76,7 +91,7 @@ func NewBoltForest(opts ...Option) ForestStorage {
return &b
}
-func (t *boltForest) SetMode(m mode.Mode) error {
+func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
t.modeMtx.Lock()
defer t.modeMtx.Unlock()
@@ -84,41 +99,56 @@ func (t *boltForest) SetMode(m mode.Mode) error {
return nil
}
- err := t.Close()
+ err := t.Close(ctx)
if err == nil && !m.NoMetabase() {
- if err = t.Open(m.ReadOnly()); err == nil {
- err = t.Init()
+ if err = t.openBolt(m); err == nil {
+ err = t.Init(ctx)
}
}
if err != nil {
- return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
+ return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
}
t.mode = m
+ t.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
return nil
}
-func (t *boltForest) Open(readOnly bool) error {
+
+func (t *boltForest) Open(_ context.Context, mode mode.Mode) error {
+ t.modeMtx.Lock()
+ defer t.modeMtx.Unlock()
+ t.mode = mode
+ if mode.NoMetabase() {
+ return nil
+ }
+ return t.openBolt(mode)
+}
+
+func (t *boltForest) openBolt(m mode.Mode) error {
+ readOnly := m.ReadOnly()
err := util.MkdirAllX(filepath.Dir(t.path), t.perm)
if err != nil {
- return fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err)
+ return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err))
}
opts := *bbolt.DefaultOptions
opts.ReadOnly = readOnly
opts.NoSync = t.noSync
opts.Timeout = 100 * time.Millisecond
+ opts.OpenFile = t.openFile
t.db, err = bbolt.Open(t.path, t.perm, &opts)
if err != nil {
- return fmt.Errorf("can't open the pilorama DB: %w", err)
+ return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err))
}
t.db.MaxBatchSize = t.maxBatchSize
t.db.MaxBatchDelay = t.maxBatchDelay
-
+ t.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
return nil
}
-func (t *boltForest) Init() error {
+
+func (t *boltForest) Init(context.Context) error {
if t.mode.NoMetabase() || t.db.IsReadOnly() {
return nil
}
@@ -128,21 +158,37 @@ func (t *boltForest) Init() error {
return err
}
_, err = tx.CreateBucketIfNotExists(logBucket)
- if err != nil {
- return err
- }
- return nil
+ return err
})
}
-func (t *boltForest) Close() error {
+
+func (t *boltForest) Close(context.Context) error {
+ var err error
if t.db != nil {
- return t.db.Close()
+ err = t.db.Close()
}
- return nil
+ if err == nil {
+ t.metrics.Close()
+ }
+ return err
+}
+
+func (t *boltForest) SetParentID(id string) {
+ t.metrics.SetParentID(id)
}
// TreeMove implements the Forest interface.
-func (t *boltForest) TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, error) {
+func (t *boltForest) TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeMove",
+ trace.WithAttributes(
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@@ -158,7 +204,7 @@ func (t *boltForest) TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, e
lm := *m
fullID := bucketName(d.CID, treeID)
- return &lm, t.db.Batch(func(tx *bbolt.Tx) error {
+ return &lm, metaerr.Wrap(t.db.Batch(func(tx *bbolt.Tx) error {
bLog, bTree, err := t.getTreeBuckets(tx, fullID)
if err != nil {
return err
@@ -168,12 +214,62 @@ func (t *boltForest) TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, e
if lm.Child == RootID {
lm.Child = t.findSpareID(bTree)
}
- return t.do(bLog, bTree, make([]byte, 17), &lm)
+ return t.do(bLog, bTree, make([]byte, maxKeySize), &lm)
+ }))
+}
+
+func (t *boltForest) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ t.modeMtx.RLock()
+ defer t.modeMtx.RUnlock()
+
+ if t.mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ var height uint64
+ var retErr error
+ err := t.db.View(func(tx *bbolt.Tx) error {
+ treeRoot := tx.Bucket(bucketName(cid, treeID))
+ if treeRoot != nil {
+ k, _ := treeRoot.Bucket(logBucket).Cursor().Last()
+ height = binary.BigEndian.Uint64(k)
+ } else {
+ retErr = ErrTreeNotFound
+ }
+ return nil
})
+ if err == nil {
+ err = retErr
+ }
+ return height, metaerr.Wrap(err)
}
// TreeExists implements the Forest interface.
-func (t *boltForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
+func (t *boltForest) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeExists", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeExists",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -188,19 +284,36 @@ func (t *boltForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
exists = treeRoot != nil
return nil
})
-
- return exists, err
+ success = err == nil
+ return exists, metaerr.Wrap(err)
}
var syncHeightKey = []byte{'h'}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
-func (t *boltForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
+func (t *boltForest) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeUpdateLastSyncHeight", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeUpdateLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", strconv.FormatUint(height, 10)),
+ ),
+ )
+ defer span.End()
+
rawHeight := make([]byte, 8)
binary.LittleEndian.PutUint64(rawHeight, height)
buck := bucketName(cid, treeID)
- return t.db.Batch(func(tx *bbolt.Tx) error {
+ err := metaerr.Wrap(t.db.Batch(func(tx *bbolt.Tx) error {
treeRoot := tx.Bucket(buck)
if treeRoot == nil {
return ErrTreeNotFound
@@ -208,11 +321,29 @@ func (t *boltForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, heig
b := treeRoot.Bucket(dataBucket)
return b.Put(syncHeightKey, rawHeight)
- })
+ }))
+ success = err == nil
+ return err
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
-func (t *boltForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
+func (t *boltForest) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeLastSyncHeight", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
var height uint64
buck := bucketName(cid, treeID)
@@ -229,11 +360,39 @@ func (t *boltForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, e
}
return nil
})
- return height, err
+ success = err == nil
+ return height, metaerr.Wrap(err)
}
// TreeAddByPath implements the Forest interface.
-func (t *boltForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) {
+func (t *boltForest) TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeAddByPath", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeAddByPath",
+ trace.WithAttributes(
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Int("meta_count", len(meta)),
+ ),
+ )
+ defer span.End()
+
+ res, err := t.addByPathInternal(d, attr, treeID, path, meta)
+ success = err == nil
+ return res, err
+}
+
+func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID string, path []string, meta []KeyValue) ([]Move, error) {
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@@ -251,7 +410,7 @@ func (t *boltForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string,
}
var lm []Move
- var key [17]byte
+ var key [maxKeySize]byte
fullID := bucketName(d.CID, treeID)
err := t.db.Batch(func(tx *bbolt.Tx) error {
@@ -260,10 +419,7 @@ func (t *boltForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string,
return err
}
- i, node, err := t.getPathPrefix(bTree, attr, path)
- if err != nil {
- return err
- }
+ i, node := t.getPathPrefix(bTree, attr, path)
ts := t.getLatestTimestamp(bLog, d.Position, d.Size)
lm = make([]Move, len(path)-i+1)
@@ -296,7 +452,7 @@ func (t *boltForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string,
}
return t.do(bLog, bTree, key[:], &lm[len(lm)-1])
})
- return lm, err
+ return lm, metaerr.Wrap(err)
}
// getLatestTimestamp returns timestamp for a new operation which is guaranteed to be bigger than
@@ -327,10 +483,23 @@ func (t *boltForest) findSpareID(bTree *bbolt.Bucket) uint64 {
}
// TreeApply implements the Forest interface.
-func (t *boltForest) TreeApply(d CIDDescriptor, treeID string, m *Move, backgroundSync bool) error {
- if !d.checkValid() {
- return ErrInvalidCIDDescriptor
- }
+func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeApply", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApply",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.Bool("background", backgroundSync),
+ ),
+ )
+ defer span.End()
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -344,8 +513,9 @@ func (t *boltForest) TreeApply(d CIDDescriptor, treeID string, m *Move, backgrou
if backgroundSync {
var seen bool
err := t.db.View(func(tx *bbolt.Tx) error {
- treeRoot := tx.Bucket(bucketName(d.CID, treeID))
+ treeRoot := tx.Bucket(bucketName(cnr, treeID))
if treeRoot == nil {
+ success = true
return nil
}
@@ -354,16 +524,18 @@ func (t *boltForest) TreeApply(d CIDDescriptor, treeID string, m *Move, backgrou
var logKey [8]byte
binary.BigEndian.PutUint64(logKey[:], m.Time)
seen = b.Get(logKey[:]) != nil
+ success = true
return nil
})
if err != nil || seen {
- return err
+ success = err == nil
+ return metaerr.Wrap(err)
}
}
if t.db.MaxBatchSize == 1 {
- fullID := bucketName(d.CID, treeID)
- return t.db.Update(func(tx *bbolt.Tx) error {
+ fullID := bucketName(cnr, treeID)
+ err := metaerr.Wrap(t.db.Update(func(tx *bbolt.Tx) error {
bLog, bTree, err := t.getTreeBuckets(tx, fullID)
if err != nil {
return err
@@ -371,15 +543,145 @@ func (t *boltForest) TreeApply(d CIDDescriptor, treeID string, m *Move, backgrou
var lm Move
return t.applyOperation(bLog, bTree, []*Move{m}, &lm)
- })
+ }))
+ success = err == nil
+ return err
}
ch := make(chan error, 1)
- t.addBatch(d, treeID, m, ch)
- return <-ch
+ t.addBatch(cnr, treeID, m, ch)
+ err := <-ch
+ success = err == nil
+ return metaerr.Wrap(err)
}
-func (t *boltForest) addBatch(d CIDDescriptor, treeID string, m *Move, ch chan error) {
+func (t *boltForest) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeApplyBatch", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyBatch",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ m, err := t.filterSeen(cnr, treeID, m)
+ if err != nil {
+ return err
+ }
+ if len(m) == 0 {
+ success = true
+ return nil
+ }
+
+ ch := make(chan error)
+ b := &batch{
+ forest: t,
+ cid: cnr,
+ treeID: treeID,
+ results: []chan<- error{ch},
+ operations: m,
+ }
+ go func() {
+ b.run()
+ }()
+ err = <-ch
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (t *boltForest) filterSeen(cnr cidSDK.ID, treeID string, m []*Move) ([]*Move, error) {
+ t.modeMtx.RLock()
+ defer t.modeMtx.RUnlock()
+
+ if t.mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
+ ops := make([]*Move, 0, len(m))
+ err := t.db.View(func(tx *bbolt.Tx) error {
+ treeRoot := tx.Bucket(bucketName(cnr, treeID))
+ if treeRoot == nil {
+ ops = m
+ return nil
+ }
+ b := treeRoot.Bucket(logBucket)
+ for _, op := range m {
+ var logKey [8]byte
+ binary.BigEndian.PutUint64(logKey[:], op.Time)
+ seen := b.Get(logKey[:]) != nil
+ if !seen {
+ ops = append(ops, op)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, metaerr.Wrap(err)
+ }
+ return ops, nil
+}
+
+// TreeApplyStream should be used with caution: this method locks other write transactions while `source` is not closed.
+func (t *boltForest) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeApplyStream", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyStream",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ t.modeMtx.RLock()
+ defer t.modeMtx.RUnlock()
+
+ if t.mode.NoMetabase() {
+ return ErrDegradedMode
+ } else if t.mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+
+ fullID := bucketName(cnr, treeID)
+ err := metaerr.Wrap(t.db.Update(func(tx *bbolt.Tx) error {
+ bLog, bTree, err := t.getTreeBuckets(tx, fullID)
+ if err != nil {
+ return err
+ }
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case m, ok := <-source:
+ if !ok {
+ return nil
+ }
+ var lm Move
+ if e := t.applyOperation(bLog, bTree, []*Move{m}, &lm); e != nil {
+ return e
+ }
+ }
+ }
+ }))
+ success = err == nil
+ return err
+}
+
+func (t *boltForest) addBatch(cnr cidSDK.ID, treeID string, m *Move, ch chan error) {
t.mtx.Lock()
for i := 0; i < len(t.batches); i++ {
t.batches[i].mtx.Lock()
@@ -391,7 +693,7 @@ func (t *boltForest) addBatch(d CIDDescriptor, treeID string, m *Move, ch chan e
continue
}
- found := t.batches[i].cid.Equals(d.CID) && t.batches[i].treeID == treeID
+ found := t.batches[i].cid.Equals(cnr) && t.batches[i].treeID == treeID
if found {
t.batches[i].results = append(t.batches[i].results, ch)
t.batches[i].operations = append(t.batches[i].operations, m)
@@ -412,7 +714,7 @@ func (t *boltForest) addBatch(d CIDDescriptor, treeID string, m *Move, ch chan e
}
b := &batch{
forest: t,
- cid: d.CID,
+ cid: cnr,
treeID: treeID,
results: []chan<- error{ch},
operations: []*Move{m},
@@ -448,7 +750,7 @@ func (t *boltForest) getTreeBuckets(tx *bbolt.Tx, treeRoot []byte) (*bbolt.Bucke
// applyOperations applies log operations. Assumes lm are sorted by timestamp.
func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*Move, lm *Move) error {
var tmp Move
- var cKey [17]byte
+ var cKey [maxKeySize]byte
c := logBucket.Cursor()
@@ -473,7 +775,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M
key, value = c.Prev()
}
- for i := 0; i < len(ms); i++ {
+ for i := range ms {
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
// 2. Insert the operation.
@@ -629,12 +931,32 @@ func (t *boltForest) isAncestor(b *bbolt.Bucket, parent, child Node) bool {
}
// TreeGetByPath implements the Forest interface.
-func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
+func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeGetByPath", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetByPath",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Bool("latest", latest),
+ ),
+ )
+ defer span.End()
+
if !isAttributeInternal(attr) {
return nil, ErrNotPathAttribute
}
if len(path) == 0 {
+ success = true
return nil, nil
}
@@ -647,7 +969,7 @@ func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, pa
var nodes []Node
- return nodes, t.db.View(func(tx *bbolt.Tx) error {
+ err := metaerr.Wrap(t.db.View(func(tx *bbolt.Tx) error {
treeRoot := tx.Bucket(bucketName(cid, treeID))
if treeRoot == nil {
return ErrTreeNotFound
@@ -655,10 +977,7 @@ func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, pa
b := treeRoot.Bucket(dataBucket)
- i, curNode, err := t.getPathPrefix(b, attr, path[:len(path)-1])
- if err != nil {
- return err
- }
+ i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
if i < len(path)-1 {
return nil
}
@@ -667,28 +986,49 @@ func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, pa
c := b.Cursor()
- attrKey := internalKey(nil, attr, path[len(path)-1], curNode, 0)
- attrKey = attrKey[:len(attrKey)-8]
- childKey, _ := c.Seek(attrKey)
- for len(childKey) == len(attrKey)+8 && bytes.Equal(attrKey, childKey[:len(childKey)-8]) {
- child := binary.LittleEndian.Uint64(childKey[len(childKey)-8:])
- if latest {
- _, ts, _, _ := t.getState(b, stateKey(make([]byte, 9), child))
- if ts >= maxTimestamp {
- nodes = append(nodes[:0], child)
- maxTimestamp = ts
+ for i := range curNodes {
+ attrKey := internalKey(nil, attr, path[len(path)-1], curNodes[i], 0)
+ attrKey = attrKey[:len(attrKey)-8]
+ childKey, _ := c.Seek(attrKey)
+ for len(childKey) == len(attrKey)+8 && bytes.Equal(attrKey, childKey[:len(childKey)-8]) {
+ child := binary.LittleEndian.Uint64(childKey[len(childKey)-8:])
+ if latest {
+ _, ts, _, _ := t.getState(b, stateKey(make([]byte, 9), child))
+ if ts >= maxTimestamp {
+ nodes = append(nodes[:0], child)
+ maxTimestamp = ts
+ }
+ } else {
+ nodes = append(nodes, child)
}
- } else {
- nodes = append(nodes, child)
+ childKey, _ = c.Next()
}
- childKey, _ = c.Next()
}
return nil
- })
+ }))
+ success = err == nil
+ return nodes, err
}
// TreeGetMeta implements the forest interface.
-func (t *boltForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) {
+func (t *boltForest) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeGetMeta", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetMeta",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -714,24 +1054,61 @@ func (t *boltForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Met
_, _, meta, _ := t.getState(b, stateKey(key, nodeID))
return m.FromBytes(meta)
})
-
- return m, parentID, err
+ success = err == nil
+ return m, parentID, metaerr.Wrap(err)
}
-// TreeGetChildren implements the Forest interface.
-func (t *boltForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) {
+func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshold int) bool {
+ key := make([]byte, 9)
+ key[0] = 'c'
+
+ count := 0
+ for _, nodeID := range nodeIDs {
+ binary.LittleEndian.PutUint64(key[1:], nodeID)
+
+ c := b.Cursor()
+ for k, _ := c.Seek(key); len(k) == childrenKeySize && binary.LittleEndian.Uint64(k[1:]) == nodeID; k, _ = c.Next() {
+ if count++; count > threshold {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// TreeSortedByFilename implements the Forest interface.
+func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeSortedByFilename", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeSortedByFilename",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
if t.mode.NoMetabase() {
- return nil, ErrDegradedMode
+ return nil, last, ErrDegradedMode
+ }
+ if len(nodeIDs) == 0 {
+ return nil, last, errors.New("empty node list")
}
+ h := newHeap(last, count)
key := make([]byte, 9)
- key[0] = 'c'
- binary.LittleEndian.PutUint64(key[1:], nodeID)
- var children []uint64
+ var result []NodeInfo
+ var fewChildren bool
err := t.db.View(func(tx *bbolt.Tx) error {
treeRoot := tx.Bucket(bucketName(cid, treeID))
@@ -740,18 +1117,217 @@ func (t *boltForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node)
}
b := treeRoot.Bucket(dataBucket)
- c := b.Cursor()
- for k, _ := c.Seek(key); len(k) == 17 && binary.LittleEndian.Uint64(k[1:]) == nodeID; k, _ = c.Next() {
- children = append(children, binary.LittleEndian.Uint64(k[9:]))
+
+ // If the node is a leaf, we could scan all filenames in the tree.
+ // To prevent this we first count the number of children: if it is less than
+ // the number of nodes we need to return, fallback to TreeGetChildren() implementation.
+ if fewChildren = t.hasFewChildren(b, nodeIDs, count); fewChildren {
+ var err error
+ result, err = t.getChildren(b, nodeIDs)
+ return err
+ }
+
+ t.fillSortedChildren(b, nodeIDs, h)
+
+ for info, ok := h.pop(); ok; info, ok = h.pop() {
+ for _, id := range info.id {
+ childInfo, err := t.getChildInfo(b, key, id)
+ if err != nil {
+ return err
+ }
+ result = append(result, childInfo)
+ }
}
return nil
})
- return children, err
+ success = err == nil
+ if err != nil {
+ return nil, last, metaerr.Wrap(err)
+ }
+
+ if fewChildren {
+ result = sortAndCut(result, last)
+ }
+ res := mergeNodeInfos(result)
+ if len(res) > count {
+ res = res[:count]
+ }
+ if len(res) != 0 {
+ s := string(findAttr(res[len(res)-1].Meta, AttributeFilename))
+ last = NewCursor(s, res[len(res)-1].LastChild())
+ }
+ return res, last, metaerr.Wrap(err)
+}
+
+func sortByFilename(nodes []NodeInfo) {
+ slices.SortFunc(nodes, func(a, b NodeInfo) int {
+ return bytes.Compare(a.Meta.GetAttr(AttributeFilename), b.Meta.GetAttr(AttributeFilename))
+ })
+}
+
+func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo {
+ var lastBytes []byte
+ if last != nil {
+ lastBytes = []byte(last.GetFilename())
+ }
+ sortByFilename(result)
+
+ for i := range result {
+ if lastBytes == nil || bytes.Compare(lastBytes, result[i].Meta.GetAttr(AttributeFilename)) == -1 {
+ return result[i:]
+ }
+ }
+ return nil
+}
+
+func (t *boltForest) getChildInfo(b *bbolt.Bucket, key []byte, childID Node) (NodeInfo, error) {
+ childInfo := NodeInfo{ID: childID}
+ parentID, _, metaBytes, found := t.getState(b, stateKey(key, childID))
+ if found {
+ childInfo.ParentID = parentID
+ if err := childInfo.Meta.FromBytes(metaBytes); err != nil {
+ return NodeInfo{}, err
+ }
+ }
+ return childInfo, nil
+}
+
+func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *fixedHeap) {
+ c := b.Cursor()
+ prefix := internalKeyPrefix(nil, AttributeFilename)
+
+ length := uint16(0)
+ count := 0
+
+ var nodes []uint64
+ var lastFilename *string
+ for k, _ := c.Seek(prefix); len(k) > 0 && k[0] == 'i'; k, _ = c.Next() {
+ if len(k) < len(prefix)+2+16 {
+ continue
+ }
+
+ parentID := binary.LittleEndian.Uint64(k[len(k)-16:])
+ if !slices.Contains(nodeIDs, parentID) {
+ continue
+ }
+
+ actualLength := binary.LittleEndian.Uint16(k[len(prefix):])
+ childID := binary.LittleEndian.Uint64(k[len(k)-8:])
+ filename := string(k[len(prefix)+2 : len(k)-16])
+
+ if lastFilename == nil {
+ lastFilename = &filename
+ nodes = append(nodes, childID)
+ } else if *lastFilename == filename {
+ nodes = append(nodes, childID)
+ } else {
+ processed := h.push(nodes, *lastFilename)
+ nodes = MultiNode{childID}
+ lastFilename = &filename
+ if actualLength != length {
+ length = actualLength
+ count = 1
+ } else if processed {
+ if count++; count > h.count {
+ lastFilename = nil
+ nodes = nil
+ length = actualLength + 1
+ count = 0
+ c.Seek(binary.LittleEndian.AppendUint16(prefix, length))
+ c.Prev() // c.Next() will be performed by for loop
+ }
+ }
+ }
+ }
+
+ if len(nodes) != 0 && lastFilename != nil {
+ h.push(nodes, *lastFilename)
+ }
+}
+
+// TreeGetChildren implements the Forest interface.
+func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeGetChildren", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetChildren",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
+ ),
+ )
+ defer span.End()
+
+ t.modeMtx.RLock()
+ defer t.modeMtx.RUnlock()
+
+ if t.mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
+ var result []NodeInfo
+
+ err := t.db.View(func(tx *bbolt.Tx) error {
+ treeRoot := tx.Bucket(bucketName(cid, treeID))
+ if treeRoot == nil {
+ return ErrTreeNotFound
+ }
+
+ b := treeRoot.Bucket(dataBucket)
+
+ var err error
+ result, err = t.getChildren(b, []Node{nodeID})
+ return err
+ })
+ success = err == nil
+ return result, metaerr.Wrap(err)
+}
+
+func (t *boltForest) getChildren(b *bbolt.Bucket, nodeIDs MultiNode) ([]NodeInfo, error) {
+ var result []NodeInfo
+
+ key := make([]byte, 9)
+ for _, nodeID := range nodeIDs {
+ key[0] = 'c'
+ binary.LittleEndian.PutUint64(key[1:], nodeID)
+
+ c := b.Cursor()
+ for k, _ := c.Seek(key); len(k) == childrenKeySize && binary.LittleEndian.Uint64(k[1:]) == nodeID; k, _ = c.Next() {
+ childID := binary.LittleEndian.Uint64(k[9:])
+ childInfo, err := t.getChildInfo(b, key, childID)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, childInfo)
+ }
+ }
+ return result, nil
}
// TreeList implements the Forest interface.
-func (t *boltForest) TreeList(cid cidSDK.ID) ([]string, error) {
+func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeList", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeList",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -778,14 +1354,31 @@ func (t *boltForest) TreeList(cid cidSDK.ID) ([]string, error) {
return nil
})
if err != nil {
- return nil, fmt.Errorf("could not list trees: %w", err)
+ return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err))
}
-
+ success = true
return ids, nil
}
// TreeGetOpLog implements the pilorama.Forest interface.
-func (t *boltForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error) {
+func (t *boltForest) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeGetOpLog", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetOpLog",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", strconv.FormatUint(height, 10)),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -810,12 +1403,28 @@ func (t *boltForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (
}
return nil
})
-
- return lm, err
+ success = err == nil
+ return lm, metaerr.Wrap(err)
}
// TreeDrop implements the pilorama.Forest interface.
-func (t *boltForest) TreeDrop(cid cidSDK.ID, treeID string) error {
+func (t *boltForest) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeDrop", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeDrop",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -825,16 +1434,17 @@ func (t *boltForest) TreeDrop(cid cidSDK.ID, treeID string) error {
return ErrReadOnlyMode
}
- return t.db.Batch(func(tx *bbolt.Tx) error {
+ err := metaerr.Wrap(t.db.Batch(func(tx *bbolt.Tx) error {
if treeID == "" {
c := tx.Cursor()
prefix := make([]byte, 32)
cid.Encode(prefix)
- for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Next() {
+ for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Seek(prefix) {
err := tx.DeleteBucket(k)
if err != nil {
return err
}
+ _, _ = c.First() // rewind the cursor to the root page
}
return nil
}
@@ -843,10 +1453,103 @@ func (t *boltForest) TreeDrop(cid cidSDK.ID, treeID string) error {
return ErrTreeNotFound
}
return err
- })
+ }))
+ success = err == nil
+ return err
}
-func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) {
+// TreeListTrees implements ForestStorage.
+func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ t.metrics.AddMethodDuration("TreeListTrees", time.Since(startedAt), success)
+ }()
+
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeListTrees")
+ defer span.End()
+
+ t.modeMtx.RLock()
+ defer t.modeMtx.RUnlock()
+
+ if t.mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
+ batchSize := prm.BatchSize
+ if batchSize <= 0 {
+ batchSize = treeListTreesBatchSizeDefault
+ }
+ var res TreeListTreesResult
+ err := metaerr.Wrap(t.db.View(func(tx *bbolt.Tx) error {
+ c := tx.Cursor()
+ checkNextPageToken := true
+ for k, _ := c.Seek(prm.NextPageToken); k != nil; k, _ = c.Next() {
+ if bytes.Equal(k, dataBucket) || bytes.Equal(k, logBucket) {
+ continue
+ }
+
+ if checkNextPageToken && bytes.Equal(k, prm.NextPageToken) {
+ checkNextPageToken = false
+ continue
+ }
+
+ var contID cidSDK.ID
+ if err := contID.Decode(k[:32]); err != nil {
+ return fmt.Errorf("decode container ID: %w", err)
+ }
+ res.Items = append(res.Items, ContainerIDTreeID{
+ CID: contID,
+ TreeID: string(k[32:]),
+ })
+
+ if len(res.Items) == batchSize {
+ res.NextPageToken = bytes.Clone(k)
+ break
+ }
+ }
+ return nil
+ }))
+ success = err == nil
+ if err != nil {
+ return nil, err
+ }
+ return &res, nil
+}
+
+func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) {
+ c := bTree.Cursor()
+
+ var curNodes []Node
+ nextNodes := []Node{RootID}
+ var attrKey []byte
+
+ for i := range path {
+ curNodes, nextNodes = nextNodes, curNodes[:0]
+ for j := range curNodes {
+ attrKey = internalKey(attrKey, attr, path[i], curNodes[j], 0)
+ attrKey = attrKey[:len(attrKey)-8]
+
+ childKey, value := c.Seek(attrKey)
+ for len(childKey) == len(attrKey)+8 && bytes.Equal(attrKey, childKey[:len(childKey)-8]) {
+ if len(value) == 1 && value[0] == 1 {
+ nextNodes = append(nextNodes, binary.LittleEndian.Uint64(childKey[len(childKey)-8:]))
+ }
+ childKey, value = c.Next()
+ }
+ }
+
+ if len(nextNodes) == 0 {
+ return i, curNodes
+ }
+ }
+
+ return len(path), nextNodes
+}
+
+func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) {
c := bTree.Cursor()
var curNode Node
@@ -866,10 +1569,10 @@ loop:
childKey, value = c.Next()
}
- return i, curNode, nil
+ return i, curNode
}
- return len(path), curNode, nil
+ return len(path), curNode
}
func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
@@ -879,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
func (t *boltForest) logFromBytes(lm *Move, data []byte) error {
lm.Child = binary.LittleEndian.Uint64(data)
lm.Parent = binary.LittleEndian.Uint64(data[8:])
- return lm.Meta.FromBytes(data[16:])
+ return lm.FromBytes(data[16:])
}
func (t *boltForest) logToBytes(lm *Move) []byte {
w := io.NewBufBinWriter()
- size := 8 + 8 + lm.Meta.Size() + 1
+ size := 8 + 8 + lm.Size() + 1
// if lm.HasOld {
// size += 8 + lm.Old.Meta.Size()
// }
@@ -892,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte {
w.Grow(size)
w.WriteU64LE(lm.Child)
w.WriteU64LE(lm.Parent)
- lm.Meta.EncodeBinary(w.BinWriter)
+ lm.EncodeBinary(w.BinWriter)
// w.WriteBool(lm.HasOld)
// if lm.HasOld {
// w.WriteU64LE(lm.Old.Parent)
@@ -946,7 +1649,17 @@ func childrenKey(key []byte, child, parent Node) []byte {
key[0] = 'c'
binary.LittleEndian.PutUint64(key[1:], parent)
binary.LittleEndian.PutUint64(key[9:], child)
- return key[:17]
+ return key[:childrenKeySize]
+}
+
+func internalKeyPrefix(key []byte, k string) []byte {
+ key = key[:0]
+ key = append(key, 'i')
+
+ l := len(k)
+ key = binary.LittleEndian.AppendUint16(key, uint16(l))
+ key = append(key, k...)
+ return key
}
// 'i' + attribute name (string) + attribute value (string) + parent (id) + node (id) -> 0/1.
@@ -956,22 +1669,13 @@ func internalKey(key []byte, k, v string, parent, node Node) []byte {
key = make([]byte, 0, size)
}
- key = key[:0]
- key = append(key, 'i')
+ key = internalKeyPrefix(key, k)
- l := len(k)
- key = append(key, byte(l), byte(l>>8))
- key = append(key, k...)
-
- l = len(v)
- key = append(key, byte(l), byte(l>>8))
+ l := len(v)
+ key = binary.LittleEndian.AppendUint16(key, uint16(l))
key = append(key, v...)
- var raw [8]byte
- binary.LittleEndian.PutUint64(raw[:], parent)
- key = append(key, raw[:]...)
-
- binary.LittleEndian.PutUint64(raw[:], node)
- key = append(key, raw[:]...)
+ key = binary.LittleEndian.AppendUint64(key, parent)
+ key = binary.LittleEndian.AppendUint64(key, node)
return key
}
diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go
index 743096c816..ebfd0bcc08 100644
--- a/pkg/local_object_storage/pilorama/forest.go
+++ b/pkg/local_object_storage/pilorama/forest.go
@@ -1,17 +1,23 @@
package pilorama
import (
+ "context"
+ "errors"
+ "fmt"
+ "slices"
"sort"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
+var errInvalidKeyFormat = errors.New("invalid format: key must be cid and treeID")
+
// memoryForest represents multiple replicating trees sharing a single storage.
type memoryForest struct {
// treeMap maps tree identifier (container ID + name) to the replicated log.
- treeMap map[string]*state
+ treeMap map[string]*memoryTree
}
var _ Forest = (*memoryForest)(nil)
@@ -20,12 +26,12 @@ var _ Forest = (*memoryForest)(nil)
// TODO: this function will eventually be removed and is here for debugging.
func NewMemoryForest() ForestStorage {
return &memoryForest{
- treeMap: make(map[string]*state),
+ treeMap: make(map[string]*memoryTree),
}
}
// TreeMove implements the Forest interface.
-func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move, error) {
+func (f *memoryForest) TreeMove(_ context.Context, d CIDDescriptor, treeID string, op *Move) (*Move, error) {
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@@ -33,7 +39,7 @@ func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move
fullID := d.CID.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
- s = newState()
+ s = newMemoryTree()
f.treeMap[fullID] = s
}
@@ -48,7 +54,7 @@ func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move
}
// TreeAddByPath implements the Forest interface.
-func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, m []KeyValue) ([]Move, error) {
+func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID string, attr string, path []string, m []KeyValue) ([]Move, error) {
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@@ -59,7 +65,7 @@ func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string
fullID := d.CID.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
- s = newState()
+ s = newMemoryTree()
f.treeMap[fullID] = s
}
@@ -70,7 +76,8 @@ func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string
Parent: node,
Meta: Meta{
Time: s.timestamp(d.Position, d.Size),
- Items: []KeyValue{{Key: attr, Value: []byte(path[j])}}},
+ Items: []KeyValue{{Key: attr, Value: []byte(path[j])}},
+ },
Child: s.findSpareID(),
})
lm[j-i] = op.Move
@@ -78,8 +85,7 @@ func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string
s.operations = append(s.operations, op)
}
- mCopy := make([]KeyValue, len(m))
- copy(mCopy, m)
+ mCopy := slices.Clone(m)
op := s.do(&Move{
Parent: node,
Meta: Meta{
@@ -88,42 +94,51 @@ func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string
},
Child: s.findSpareID(),
})
+ s.operations = append(s.operations, op)
lm[len(lm)-1] = op.Move
return lm, nil
}
// TreeApply implements the Forest interface.
-func (f *memoryForest) TreeApply(d CIDDescriptor, treeID string, op *Move, _ bool) error {
- if !d.checkValid() {
- return ErrInvalidCIDDescriptor
- }
-
- fullID := d.CID.String() + "/" + treeID
+func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, op *Move, _ bool) error {
+ fullID := cnr.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
- s = newState()
+ s = newMemoryTree()
f.treeMap[fullID] = s
}
return s.Apply(op)
}
-func (f *memoryForest) Init() error {
+func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID string, ops []*Move) error {
+ for _, op := range ops {
+ if err := f.TreeApply(ctx, cnr, treeID, op, true); err != nil {
+ return err
+ }
+ }
return nil
}
-func (f *memoryForest) Open(bool) error {
+func (f *memoryForest) Init(context.Context) error {
return nil
}
-func (f *memoryForest) SetMode(mode.Mode) error {
+
+func (f *memoryForest) Open(context.Context, mode.Mode) error {
return nil
}
-func (f *memoryForest) Close() error {
+
+func (f *memoryForest) SetMode(context.Context, mode.Mode) error {
return nil
}
+func (f *memoryForest) Close(context.Context) error {
+ return nil
+}
+func (f *memoryForest) SetParentID(string) {}
+
// TreeGetByPath implements the Forest interface.
-func (f *memoryForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
+func (f *memoryForest) TreeGetByPath(_ context.Context, cid cid.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
if !isAttributeInternal(attr) {
return nil, ErrNotPathAttribute
}
@@ -134,40 +149,93 @@ func (f *memoryForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string,
return nil, ErrTreeNotFound
}
- return s.get(attr, path, latest), nil
+ return s.getByPath(attr, path, latest), nil
}
// TreeGetMeta implements the Forest interface.
-func (f *memoryForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) {
+func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, nodeID Node) (Meta, Node, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
return Meta{}, 0, ErrTreeNotFound
}
- return s.getMeta(nodeID), s.infoMap[nodeID].Parent, nil
+ return s.infoMap[nodeID].Meta, s.infoMap[nodeID].Parent, nil
+}
+
+// TreeSortedByFilename implements the Forest interface.
+func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
+ fullID := cid.String() + "/" + treeID
+ s, ok := f.treeMap[fullID]
+ if !ok {
+ return nil, start, ErrTreeNotFound
+ }
+ if count == 0 {
+ return nil, start, nil
+ }
+
+ var res []NodeInfo
+
+ for _, nodeID := range nodeIDs {
+ children := s.getChildren(nodeID)
+ for _, childID := range children {
+ var found bool
+ for _, kv := range s.infoMap[childID].Meta.Items {
+ if kv.Key == AttributeFilename {
+ found = true
+ break
+ }
+ }
+ if !found {
+ continue
+ }
+ res = append(res, NodeInfo{
+ ID: childID,
+ Meta: s.infoMap[childID].Meta,
+ ParentID: s.infoMap[childID].Parent,
+ })
+ }
+ }
+ if len(res) == 0 {
+ return nil, start, nil
+ }
+
+ sortByFilename(res)
+
+ r := mergeNodeInfos(res)
+ for i := range r {
+ if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() {
+ finish := min(len(res), i+count)
+ last := string(findAttr(r[finish-1].Meta, AttributeFilename))
+ return r[i:finish], NewCursor(last, 0), nil
+ }
+ }
+ last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename))
+ return nil, NewCursor(last, 0), nil
}
// TreeGetChildren implements the Forest interface.
-func (f *memoryForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) {
+func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID string, nodeID Node) ([]NodeInfo, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
return nil, ErrTreeNotFound
}
- children, ok := s.childMap[nodeID]
- if !ok {
- return nil, nil
+ children := s.getChildren(nodeID)
+ res := make([]NodeInfo, 0, len(children))
+ for _, childID := range children {
+ res = append(res, NodeInfo{
+ ID: childID,
+ Meta: s.infoMap[childID].Meta,
+ ParentID: s.infoMap[childID].Parent,
+ })
}
-
- res := make([]Node, len(children))
- copy(res, children)
return res, nil
}
// TreeGetOpLog implements the pilorama.Forest interface.
-func (f *memoryForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error) {
+func (f *memoryForest) TreeGetOpLog(_ context.Context, cid cid.ID, treeID string, height uint64) (Move, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -184,7 +252,7 @@ func (f *memoryForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64)
}
// TreeDrop implements the pilorama.Forest interface.
-func (f *memoryForest) TreeDrop(cid cidSDK.ID, treeID string) error {
+func (f *memoryForest) TreeDrop(_ context.Context, cid cid.ID, treeID string) error {
cidStr := cid.String()
if treeID == "" {
for k := range f.treeMap {
@@ -204,7 +272,7 @@ func (f *memoryForest) TreeDrop(cid cidSDK.ID, treeID string) error {
}
// TreeList implements the pilorama.Forest interface.
-func (f *memoryForest) TreeList(cid cidSDK.ID) ([]string, error) {
+func (f *memoryForest) TreeList(_ context.Context, cid cid.ID) ([]string, error) {
var res []string
cidStr := cid.EncodeToString()
@@ -220,15 +288,24 @@ func (f *memoryForest) TreeList(cid cidSDK.ID) ([]string, error) {
return res, nil
}
+func (f *memoryForest) TreeHeight(_ context.Context, cid cid.ID, treeID string) (uint64, error) {
+ fullID := cid.EncodeToString() + "/" + treeID
+ tree, ok := f.treeMap[fullID]
+ if !ok {
+ return 0, ErrTreeNotFound
+ }
+ return tree.operations[len(tree.operations)-1].Time, nil
+}
+
// TreeExists implements the pilorama.Forest interface.
-func (f *memoryForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
+func (f *memoryForest) TreeExists(_ context.Context, cid cid.ID, treeID string) (bool, error) {
fullID := cid.EncodeToString() + "/" + treeID
_, ok := f.treeMap[fullID]
return ok, nil
}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
-func (f *memoryForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
+func (f *memoryForest) TreeUpdateLastSyncHeight(_ context.Context, cid cid.ID, treeID string, height uint64) error {
fullID := cid.EncodeToString() + "/" + treeID
t, ok := f.treeMap[fullID]
if !ok {
@@ -239,7 +316,7 @@ func (f *memoryForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, he
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
-func (f *memoryForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
+func (f *memoryForest) TreeLastSyncHeight(_ context.Context, cid cid.ID, treeID string) (uint64, error) {
fullID := cid.EncodeToString() + "/" + treeID
t, ok := f.treeMap[fullID]
if !ok {
@@ -247,3 +324,77 @@ func (f *memoryForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64,
}
return t.syncHeight, nil
}
+
+// TreeListTrees implements Forest.
+func (f *memoryForest) TreeListTrees(_ context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error) {
+ batchSize := prm.BatchSize
+ if batchSize <= 0 {
+ batchSize = treeListTreesBatchSizeDefault
+ }
+ tmpSlice := make([]string, 0, len(f.treeMap))
+ for k := range f.treeMap {
+ tmpSlice = append(tmpSlice, k)
+ }
+ sort.Strings(tmpSlice)
+ var idx int
+ if len(prm.NextPageToken) > 0 {
+ last := string(prm.NextPageToken)
+ idx, _ = sort.Find(len(tmpSlice), func(i int) int {
+ return -1 * strings.Compare(tmpSlice[i], last)
+ })
+ if idx == len(tmpSlice) {
+ return &TreeListTreesResult{}, nil
+ }
+ if tmpSlice[idx] == last {
+ idx++
+ }
+ }
+
+ var result TreeListTreesResult
+ for idx < len(tmpSlice) {
+ cidAndTree := strings.Split(tmpSlice[idx], "/")
+ if len(cidAndTree) != 2 {
+ return nil, errInvalidKeyFormat
+ }
+ var contID cid.ID
+ if err := contID.DecodeString(cidAndTree[0]); err != nil {
+ return nil, fmt.Errorf("invalid format: %w", err)
+ }
+
+ result.Items = append(result.Items, ContainerIDTreeID{
+ CID: contID,
+ TreeID: cidAndTree[1],
+ })
+
+ if len(result.Items) == batchSize {
+ result.NextPageToken = []byte(tmpSlice[idx])
+ break
+ }
+ idx++
+ }
+ return &result, nil
+}
+
+// TreeApplyStream implements ForestStorage.
+func (f *memoryForest) TreeApplyStream(ctx context.Context, cnr cid.ID, treeID string, source <-chan *Move) error {
+ fullID := cnr.String() + "/" + treeID
+ s, ok := f.treeMap[fullID]
+ if !ok {
+ s = newMemoryTree()
+ f.treeMap[fullID] = s
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case m, ok := <-source:
+ if !ok {
+ return nil
+ }
+ if e := s.Apply(m); e != nil {
+ return e
+ }
+ }
+ }
+}
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 9fe372b36f..844084c552 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -1,55 +1,52 @@
package pilorama
import (
+ "bytes"
+ "context"
+ "crypto/rand"
"fmt"
- "math/rand"
- "os"
+ mrand "math/rand"
"path/filepath"
+ "slices"
"strconv"
+ "strings"
"sync"
"testing"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/google/uuid"
"github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
)
var providers = []struct {
name string
- construct func(t testing.TB, opts ...Option) Forest
+ construct func(t testing.TB, opts ...Option) ForestStorage
}{
- {"inmemory", func(t testing.TB, _ ...Option) Forest {
+ {"inmemory", func(t testing.TB, _ ...Option) ForestStorage {
f := NewMemoryForest()
- require.NoError(t, f.Open(false))
- require.NoError(t, f.Init())
- t.Cleanup(func() {
- require.NoError(t, f.Close())
- })
-
+ require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, f.Init(context.Background()))
return f
}},
- {"bbolt", func(t testing.TB, opts ...Option) Forest {
- // Use `os.TempDir` because we construct multiple times in the same test.
- tmpDir, err := os.MkdirTemp(os.TempDir(), "*")
- require.NoError(t, err)
-
+ {"bbolt", func(t testing.TB, opts ...Option) ForestStorage {
f := NewBoltForest(
append([]Option{
- WithPath(filepath.Join(tmpDir, "test.db")),
- WithMaxBatchSize(1)}, opts...)...)
- require.NoError(t, f.Open(false))
- require.NoError(t, f.Init())
- t.Cleanup(func() {
- require.NoError(t, f.Close())
- require.NoError(t, os.RemoveAll(tmpDir))
- })
+ WithPath(filepath.Join(t.TempDir(), "test.db")),
+ WithMaxBatchSize(1),
+ }, opts...)...)
+ require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, f.Init(context.Background()))
return f
}},
}
func testMeta(t *testing.T, f Forest, cid cidSDK.ID, treeID string, nodeID, parentID Node, expected Meta) {
- actualMeta, actualParent, err := f.TreeGetMeta(cid, treeID, nodeID)
+ actualMeta, actualParent, err := f.TreeGetMeta(context.Background(), cid, treeID, nodeID)
require.NoError(t, err)
require.Equal(t, parentID, actualParent)
require.Equal(t, expected, actualMeta)
@@ -63,21 +60,24 @@ func TestForest_TreeMove(t *testing.T) {
}
}
-func testForestTreeMove(t *testing.T, s Forest) {
+func testForestTreeMove(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
treeID := "version"
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
- {Key: AttributeFilename, Value: []byte("file.txt")}}
- lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
+ {Key: AttributeFilename, Value: []byte("file.txt")},
+ }
+ lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 3, len(lm))
nodeID := lm[2].Child
t.Run("invalid descriptor", func(t *testing.T) {
- _, err = s.TreeMove(CIDDescriptor{cid, 0, 0}, treeID, &Move{
+ _, err = s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, &Move{
Parent: lm[1].Child,
Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})},
Child: nodeID,
@@ -85,7 +85,7 @@ func testForestTreeMove(t *testing.T, s Forest) {
require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
})
t.Run("same parent, update meta", func(t *testing.T) {
- res, err := s.TreeMove(d, treeID, &Move{
+ res, err := s.TreeMove(context.Background(), d, treeID, &Move{
Parent: lm[1].Child,
Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})},
Child: nodeID,
@@ -93,12 +93,12 @@ func testForestTreeMove(t *testing.T, s Forest) {
require.NoError(t, err)
require.Equal(t, res.Child, nodeID)
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{nodeID}, nodes)
})
t.Run("different parent", func(t *testing.T) {
- res, err := s.TreeMove(d, treeID, &Move{
+ res, err := s.TreeMove(context.Background(), d, treeID, &Move{
Parent: RootID,
Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})},
Child: nodeID,
@@ -106,11 +106,11 @@ func testForestTreeMove(t *testing.T, s Forest) {
require.NoError(t, err)
require.Equal(t, res.Child, nodeID)
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
require.NoError(t, err)
require.True(t, len(nodes) == 0)
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"file.txt"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{nodeID}, nodes)
})
@@ -124,13 +124,15 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) {
}
}
-func testForestTreeGetChildren(t *testing.T, s Forest) {
+func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
treeID := "version"
treeAdd := func(t *testing.T, child, parent Node) {
- _, err := s.TreeMove(d, treeID, &Move{
+ _, err := s.TreeMove(context.Background(), d, treeID, &Move{
Parent: parent,
Child: child,
})
@@ -151,16 +153,23 @@ func testForestTreeGetChildren(t *testing.T, s Forest) {
treeAdd(t, 2, 0)
treeAdd(t, 7, 0)
- testGetChildren := func(t *testing.T, nodeID Node, expected []Node) {
- actual, err := s.TreeGetChildren(cid, treeID, nodeID)
+ testGetChildren := func(t *testing.T, nodeID Node, expected []NodeInfo) {
+ actual, err := s.TreeGetChildren(context.Background(), cid, treeID, nodeID)
require.NoError(t, err)
require.ElementsMatch(t, expected, actual)
}
- testGetChildren(t, 0, []uint64{10, 2, 7})
- testGetChildren(t, 10, []uint64{3, 6})
+ testGetChildren(t, 0, []NodeInfo{
+ {ID: 10, Meta: Meta{Time: 1, Items: []KeyValue{}}},
+ {ID: 2, Meta: Meta{Time: 5, Items: []KeyValue{}}},
+ {ID: 7, Meta: Meta{Time: 6, Items: []KeyValue{}}},
+ })
+ testGetChildren(t, 10, []NodeInfo{
+ {ID: 3, ParentID: 10, Meta: Meta{Time: 2, Items: []KeyValue{}}},
+ {ID: 6, ParentID: 10, Meta: Meta{Time: 3, Items: []KeyValue{}}},
+ })
testGetChildren(t, 3, nil)
- testGetChildren(t, 6, []uint64{11})
+ testGetChildren(t, 6, []NodeInfo{{ID: 11, ParentID: 6, Meta: Meta{Time: 4, Items: []KeyValue{}}}})
testGetChildren(t, 11, nil)
testGetChildren(t, 2, nil)
testGetChildren(t, 7, nil)
@@ -168,11 +177,273 @@ func testForestTreeGetChildren(t *testing.T, s Forest) {
testGetChildren(t, 42, nil)
})
t.Run("missing tree", func(t *testing.T) {
- _, err := s.TreeGetChildren(cid, treeID+"123", 0)
+ _, err := s.TreeGetChildren(context.Background(), cid, treeID+"123", 0)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
+func BenchmarkForestSortedIteration(b *testing.B) {
+ for i := range providers {
+ if providers[i].name == "inmemory" {
+ continue
+ }
+
+ cnr := cidtest.ID()
+ treeID := "version"
+ f := providers[i].construct(b)
+
+ const total = 100_000
+ d := CIDDescriptor{cnr, 0, 1}
+ for i := range total {
+ u, err := uuid.NewRandom()
+ if err != nil {
+ b.FailNow()
+ }
+
+ _, err = f.TreeMove(context.Background(), d, treeID, &Move{
+ Parent: RootID,
+ Child: RootID + Node(i+1),
+ Meta: Meta{
+ Time: Timestamp(i + 1),
+ Items: []KeyValue{{
+ Key: AttributeFilename, Value: []byte(u.String()),
+ }},
+ },
+ })
+ if err != nil {
+ b.FailNow()
+ }
+ }
+
+ b.Run(providers[i].name+",root", func(b *testing.B) {
+ for range b.N {
+ res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100)
+ if err != nil || len(res) != 100 {
+ b.Fatalf("err %v, count %d", err, len(res))
+ }
+ }
+ })
+ b.Run(providers[i].name+",leaf", func(b *testing.B) {
+ for range b.N {
+ res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100)
+ if err != nil || len(res) != 0 {
+ b.FailNow()
+ }
+ }
+ })
+ }
+}
+
+// The issue which we call "BugWithSkip" is easiest to understand when filenames are
+// monotonically increasing numbers. We want the list of sorted filenames to have different length interleaved.
+// The bug happens when we switch between length during listing.
+// Thus this test contains numbers from 1 to 2000 and batch size of size 10.
+func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
+ for i := range providers {
+ t.Run(providers[i].name, func(t *testing.T) {
+ testForestTreeSortedIterationBugWithSkip(t, providers[i].construct(t))
+ })
+ }
+}
+
+func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
+ cid := cidtest.ID()
+ d := CIDDescriptor{cid, 0, 1}
+ treeID := "version"
+ treeAdd := func(t *testing.T, ts int, filename string) {
+ _, err := s.TreeMove(context.Background(), d, treeID, &Move{
+ Child: RootID + uint64(ts),
+ Parent: RootID,
+ Meta: Meta{
+ Time: Timestamp(ts),
+ Items: []KeyValue{
+ {Key: AttributeFilename, Value: []byte(filename)},
+ },
+ },
+ })
+ require.NoError(t, err)
+ }
+
+ const count = 2000
+ treeAdd(t, 1, "")
+ for i := 1; i < count; i++ {
+ treeAdd(t, i+1, strconv.Itoa(i+1))
+ }
+
+ var result []MultiNodeInfo
+ treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
+ res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
+ require.NoError(t, err)
+ result = append(result, res...)
+ return cursor
+ }
+
+ const batchSize = 10
+ last := treeAppend(t, nil, batchSize)
+ for i := 1; i < count/batchSize; i++ {
+ last = treeAppend(t, last, batchSize)
+ }
+ require.Len(t, result, count)
+ require.True(t, slices.IsSortedFunc(result, func(a, b MultiNodeInfo) int {
+ filenameA := findAttr(a.Meta, AttributeFilename)
+ filenameB := findAttr(b.Meta, AttributeFilename)
+ return bytes.Compare(filenameA, filenameB)
+ }))
+}
+
+func TestForest_TreeSortedIteration(t *testing.T) {
+ for i := range providers {
+ t.Run(providers[i].name, func(t *testing.T) {
+ testForestTreeSortedIteration(t, providers[i].construct(t))
+ })
+ }
+}
+
+func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
+ cid := cidtest.ID()
+ d := CIDDescriptor{cid, 0, 1}
+ treeID := "version"
+ treeAdd := func(t *testing.T, ts int, filename string) {
+ _, err := s.TreeMove(context.Background(), d, treeID, &Move{
+ Child: RootID + uint64(ts),
+ Parent: RootID,
+ Meta: Meta{
+ Time: Timestamp(ts),
+ Items: []KeyValue{
+ {Key: AttributeFilename, Value: []byte(filename)},
+ },
+ },
+ })
+ require.NoError(t, err)
+ }
+
+ const count = 9
+ treeAdd(t, 1, "")
+ for i := 1; i < count; i++ {
+ treeAdd(t, i+1, strconv.Itoa(i+1))
+ }
+
+ var result []MultiNodeInfo
+ treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
+ res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
+ require.NoError(t, err)
+ result = append(result, res...)
+ return cursor
+ }
+
+ last := treeAppend(t, nil, 2)
+ last = treeAppend(t, last, 3)
+ last = treeAppend(t, last, 0)
+ last = treeAppend(t, last, 1)
+ _ = treeAppend(t, last, 10)
+
+ require.Len(t, result, count)
+ for i := range result {
+ require.Equal(t, MultiNode{RootID + uint64(i+1)}, result[i].Children)
+ if i == 0 {
+ require.Equal(t, "", string(findAttr(result[i].Meta, AttributeFilename)))
+ } else {
+ require.Equal(t, strconv.Itoa(RootID+i+1), string(findAttr(result[i].Meta, AttributeFilename)))
+ }
+ }
+}
+
+func TestForest_TreeSortedFilename(t *testing.T) {
+ for i := range providers {
+ t.Run(providers[i].name, func(t *testing.T) {
+ testForestTreeSortedByFilename(t, providers[i].construct(t))
+ })
+ }
+}
+
+func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
+ const controlAttr = "control_attr"
+ cid := cidtest.ID()
+ d := CIDDescriptor{cid, 0, 1}
+ treeID := "version"
+
+ treeAddByPath := func(t *testing.T, filename string) {
+ path := strings.Split(filename, "/")
+ _, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, path[:len(path)-1],
+ []KeyValue{
+ {Key: AttributeFilename, Value: []byte(path[len(path)-1])},
+ {Key: controlAttr, Value: []byte(filename)},
+ },
+ )
+ require.NoError(t, err)
+ }
+
+ expectAttributes := func(t *testing.T, attr string, expected []string, res []MultiNodeInfo) {
+ require.Equal(t, len(expected), len(res))
+
+ actual := make([]string, len(res))
+ for i := range actual {
+ actual[i] = string(findAttr(res[i].Meta, attr))
+ }
+ require.Equal(t, expected, actual)
+ }
+
+ items := []string{
+ "a/bbb/ccc",
+ "a/bbb/xxx",
+ "a/bbb/z",
+ "b/bbb/ccc",
+ "b/xxx/z",
+ "c",
+ }
+
+ // Ensure we do not depend on insertion order in any way.
+ mrand.Shuffle(len(items), func(i, j int) {
+ items[i], items[j] = items[j], items[i]
+ })
+ for i := range items {
+ treeAddByPath(t, items[i])
+ }
+
+ getChildren := func(t *testing.T, id MultiNode) []MultiNodeInfo {
+ res, _, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, id, nil, len(items))
+ require.NoError(t, err)
+ return res
+ }
+
+ res := getChildren(t, MultiNode{RootID})
+ expectAttributes(t, AttributeFilename, []string{"a", "b", "c"}, res)
+ expectAttributes(t, controlAttr, []string{"", "", "c"}, res)
+
+ {
+ ra := getChildren(t, res[0].Children)
+ expectAttributes(t, AttributeFilename, []string{"bbb"}, ra)
+ expectAttributes(t, controlAttr, []string{""}, ra)
+
+ rabbb := getChildren(t, ra[0].Children)
+ expectAttributes(t, AttributeFilename, []string{"ccc", "xxx", "z"}, rabbb)
+ expectAttributes(t, controlAttr, []string{"a/bbb/ccc", "a/bbb/xxx", "a/bbb/z"}, rabbb)
+ }
+ {
+ rb := getChildren(t, res[1].Children)
+ expectAttributes(t, AttributeFilename, []string{"bbb", "xxx"}, rb)
+ expectAttributes(t, controlAttr, []string{"", ""}, rb)
+
+ rbbbb := getChildren(t, rb[0].Children)
+ expectAttributes(t, AttributeFilename, []string{"ccc"}, rbbbb)
+ expectAttributes(t, controlAttr, []string{"b/bbb/ccc"}, rbbbb)
+
+ rbxxx := getChildren(t, rb[1].Children)
+ expectAttributes(t, AttributeFilename, []string{"z"}, rbxxx)
+ expectAttributes(t, controlAttr, []string{"b/xxx/z"}, rbxxx)
+ }
+ {
+ rc := getChildren(t, res[2].Children)
+ require.Len(t, rc, 0)
+ }
+}
+
func TestForest_TreeDrop(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
@@ -181,7 +452,9 @@ func TestForest_TreeDrop(t *testing.T) {
}
}
-func testForestTreeDrop(t *testing.T, s Forest) {
+func testForestTreeDrop(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
const cidsSize = 3
var cids [cidsSize]cidSDK.ID
@@ -191,10 +464,10 @@ func testForestTreeDrop(t *testing.T, s Forest) {
cid := cids[0]
t.Run("return nil if not found", func(t *testing.T) {
- require.ErrorIs(t, s.TreeDrop(cid, "123"), ErrTreeNotFound)
+ require.ErrorIs(t, s.TreeDrop(context.Background(), cid, "123"), ErrTreeNotFound)
})
- require.NoError(t, s.TreeDrop(cid, ""))
+ require.NoError(t, s.TreeDrop(context.Background(), cid, ""))
trees := []string{"tree1", "tree2"}
var descs [cidsSize]CIDDescriptor
@@ -203,38 +476,39 @@ func testForestTreeDrop(t *testing.T, s Forest) {
}
d := descs[0]
for i := range trees {
- _, err := s.TreeAddByPath(d, trees[i], AttributeFilename, []string{"path"},
+ _, err := s.TreeAddByPath(context.Background(), d, trees[i], AttributeFilename, []string{"path"},
[]KeyValue{{Key: "TreeName", Value: []byte(trees[i])}})
require.NoError(t, err)
}
- err := s.TreeDrop(cid, trees[0])
+ err := s.TreeDrop(context.Background(), cid, trees[0])
require.NoError(t, err)
- _, err = s.TreeGetByPath(cid, trees[0], AttributeFilename, []string{"path"}, true)
+ _, err = s.TreeGetByPath(context.Background(), cid, trees[0], AttributeFilename, []string{"path"}, true)
require.ErrorIs(t, err, ErrTreeNotFound)
- _, err = s.TreeGetByPath(cid, trees[1], AttributeFilename, []string{"path"}, true)
+ _, err = s.TreeGetByPath(context.Background(), cid, trees[1], AttributeFilename, []string{"path"}, true)
require.NoError(t, err)
for j := range descs {
for i := range trees {
- _, err := s.TreeAddByPath(descs[j], trees[i], AttributeFilename, []string{"path"},
+ _, err := s.TreeAddByPath(context.Background(), descs[j], trees[i], AttributeFilename, []string{"path"},
[]KeyValue{{Key: "TreeName", Value: []byte(trees[i])}})
require.NoError(t, err)
}
}
- list, err := s.TreeList(cid)
+ list, err := s.TreeList(context.Background(), cid)
+ require.NoError(t, err)
require.NotEmpty(t, list)
- require.NoError(t, s.TreeDrop(cid, ""))
+ require.NoError(t, s.TreeDrop(context.Background(), cid, ""))
- list, err = s.TreeList(cid)
+ list, err = s.TreeList(context.Background(), cid)
require.NoError(t, err)
require.Empty(t, list)
for j := 1; j < len(cids); j++ {
- list, err = s.TreeList(cids[j])
+ list, err = s.TreeList(context.Background(), cids[j])
require.NoError(t, err)
require.Equal(t, len(list), len(trees))
}
@@ -248,14 +522,17 @@ func TestForest_TreeAdd(t *testing.T) {
}
}
-func testForestTreeAdd(t *testing.T, s Forest) {
+func testForestTreeAdd(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
treeID := "version"
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
- {Key: AttributeFilename, Value: []byte("file.txt")}}
+ {Key: AttributeFilename, Value: []byte("file.txt")},
+ }
m := &Move{
Parent: RootID,
Child: RootID,
@@ -263,24 +540,24 @@ func testForestTreeAdd(t *testing.T, s Forest) {
}
t.Run("invalid descriptor", func(t *testing.T) {
- _, err := s.TreeMove(CIDDescriptor{cid, 0, 0}, treeID, m)
+ _, err := s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, m)
require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
})
- lm, err := s.TreeMove(d, treeID, m)
+ lm, err := s.TreeMove(context.Background(), d, treeID, m)
require.NoError(t, err)
testMeta(t, s, cid, treeID, lm.Child, lm.Parent, Meta{Time: lm.Time, Items: meta})
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"file.txt"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{lm.Child}, nodes)
t.Run("other trees are unaffected", func(t *testing.T) {
- _, err := s.TreeGetByPath(cid, treeID+"123", AttributeFilename, []string{"file.txt"}, false)
+ _, err := s.TreeGetByPath(context.Background(), cid, treeID+"123", AttributeFilename, []string{"file.txt"}, false)
require.ErrorIs(t, err, ErrTreeNotFound)
- _, _, err = s.TreeGetMeta(cid, treeID+"123", 0)
+ _, _, err = s.TreeGetMeta(context.Background(), cid, treeID+"123", 0)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
@@ -293,25 +570,28 @@ func TestForest_TreeAddByPath(t *testing.T) {
}
}
-func testForestTreeAddByPath(t *testing.T, s Forest) {
+func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
treeID := "version"
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
- {Key: AttributeFilename, Value: []byte("file.txt")}}
+ {Key: AttributeFilename, Value: []byte("file.txt")},
+ }
t.Run("invalid descriptor", func(t *testing.T) {
- _, err := s.TreeAddByPath(CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta)
+ _, err := s.TreeAddByPath(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta)
require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
})
t.Run("invalid attribute", func(t *testing.T) {
- _, err := s.TreeAddByPath(d, treeID, AttributeVersion, []string{"yyy"}, meta)
+ _, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeVersion, []string{"yyy"}, meta)
require.ErrorIs(t, err, ErrNotPathAttribute)
})
- lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
+ lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 3, len(lm))
testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("path")}}})
@@ -320,8 +600,12 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
firstID := lm[2].Child
testMeta(t, s, cid, treeID, firstID, lm[2].Parent, Meta{Time: lm[2].Time, Items: meta})
+ // TreeAddByPath must return operations in increasing time order.
+ require.True(t, lm[0].Time < lm[1].Time)
+ require.True(t, lm[1].Time < lm[2].Time)
+
meta[0].Value = []byte("YYY")
- lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
+ lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
@@ -330,19 +614,19 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
t.Run("get versions", func(t *testing.T) {
// All versions.
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{firstID, secondID}, nodes)
// Latest version.
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, true)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, true)
require.NoError(t, err)
require.Equal(t, []Node{secondID}, nodes)
})
meta[0].Value = []byte("ZZZ")
meta[1].Value = []byte("cat.jpg")
- lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "dir"}, meta)
+ lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "dir"}, meta)
require.NoError(t, err)
require.Equal(t, 2, len(lm))
testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("dir")}}})
@@ -351,7 +635,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
t.Run("create internal nodes", func(t *testing.T) {
meta[0].Value = []byte("SomeValue")
meta[1].Value = []byte("another")
- lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path"}, meta)
+ lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
@@ -359,7 +643,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
meta[0].Value = []byte("Leaf")
meta[1].Value = []byte("file.txt")
- lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "another"}, meta)
+ lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "another"}, meta)
require.NoError(t, err)
require.Equal(t, 2, len(lm))
@@ -371,15 +655,16 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
testMeta(t, s, cid, treeID, oldMove.Child, oldMove.Parent,
Meta{Time: oldMove.Time, Items: []KeyValue{
{AttributeVersion, []byte("SomeValue")},
- {AttributeFilename, []byte("another")}}})
+ {AttributeFilename, []byte("another")},
+ }})
t.Run("get by path", func(t *testing.T) {
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "another"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another"}, false)
require.NoError(t, err)
require.Equal(t, 2, len(nodes))
require.ElementsMatch(t, []Node{lm[0].Child, oldMove.Child}, nodes)
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "another", "file.txt"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another", "file.txt"}, false)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
require.Equal(t, lm[1].Child, nodes[0])
@@ -389,12 +674,13 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
t.Run("empty component", func(t *testing.T) {
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
- {Key: AttributeFilename, Value: []byte{}}}
- lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
+ {Key: AttributeFilename, Value: []byte{}},
+ }
+ lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
- nodes, err := s.TreeGetByPath(d.CID, treeID, AttributeFilename, []string{"path", "to", ""}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), d.CID, treeID, AttributeFilename, []string{"path", "to", ""}, false)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
require.Equal(t, lm[0].Child, nodes[0])
@@ -409,23 +695,12 @@ func TestForest_Apply(t *testing.T) {
}
}
-func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
+func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
cid := cidtest.ID()
- d := CIDDescriptor{cid, 0, 1}
treeID := "version"
- t.Run("invalid descriptor", func(t *testing.T) {
- s := constructor(t)
- err := s.TreeApply(CIDDescriptor{cid, 0, 0}, treeID, &Move{
- Child: 10,
- Parent: 0,
- Meta: Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}},
- }, false)
- require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
- })
-
testApply := func(t *testing.T, s Forest, child, parent Node, meta Meta) {
- require.NoError(t, s.TreeApply(d, treeID, &Move{
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{
Child: child,
Parent: parent,
Meta: meta,
@@ -434,6 +709,8 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
t.Run("add a child, then insert a parent removal", func(t *testing.T) {
s := constructor(t)
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}})
meta := Meta{Time: 3, Items: []KeyValue{{"child", []byte{3}}}}
@@ -445,6 +722,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
})
t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) {
s := constructor(t)
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}}
testApply(t, s, 11, 10, meta)
@@ -455,6 +733,87 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
})
}
+func TestForest_ApplySameOperation(t *testing.T) {
+ for i := range providers {
+ t.Run(providers[i].name, func(t *testing.T) {
+ parallel := providers[i].name != "inmemory"
+ testForestApplySameOperation(t, providers[i].construct, parallel)
+ })
+ }
+}
+
+func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, parallel bool) {
+ cid := cidtest.ID()
+ treeID := "version"
+
+ batchSize := 3
+ ctx := context.Background()
+ errG, _ := errgroup.WithContext(ctx)
+ if !parallel {
+ batchSize = 1
+ errG.SetLimit(1)
+ }
+
+ meta := []Meta{
+ {Time: 1, Items: []KeyValue{{AttributeFilename, []byte("1")}, {"attr", []byte{1}}}},
+ {Time: 2, Items: []KeyValue{{AttributeFilename, []byte("2")}, {"attr", []byte{1}}}},
+ {Time: 3, Items: []KeyValue{{AttributeFilename, []byte("3")}, {"attr", []byte{1}}}},
+ }
+ logs := []Move{
+ {
+ Child: 1,
+ Parent: RootID,
+ Meta: meta[0],
+ },
+ {
+ Child: 2,
+ Parent: 1,
+ Meta: meta[1],
+ },
+ {
+ Child: 1,
+ Parent: 2,
+ Meta: meta[2],
+ },
+ }
+
+ check := func(t *testing.T, s Forest) {
+ testMeta(t, s, cid, treeID, 1, RootID, meta[0])
+ testMeta(t, s, cid, treeID, 2, 1, meta[1])
+
+ nodes, err := s.TreeGetChildren(ctx, cid, treeID, RootID)
+ require.NoError(t, err)
+ require.Equal(t, []NodeInfo{{ID: 1, ParentID: RootID, Meta: meta[0]}}, nodes)
+
+ nodes, err = s.TreeGetChildren(ctx, cid, treeID, 1)
+ require.NoError(t, err)
+ require.Equal(t, []NodeInfo{{ID: 2, ParentID: 1, Meta: meta[1]}}, nodes)
+ }
+
+ t.Run("expected", func(t *testing.T) {
+ s := constructor(t)
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
+ for i := range logs {
+ require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false))
+ }
+ check(t, s)
+ })
+
+ s := constructor(t, WithMaxBatchSize(batchSize))
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
+ require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
+ for range batchSize {
+ errG.Go(func() error {
+ return s.TreeApply(ctx, cid, treeID, &logs[2], false)
+ })
+ }
+ require.NoError(t, errG.Wait())
+ require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[1], false))
+ check(t, s)
+}
+
func TestForest_GetOpLog(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
@@ -463,9 +822,8 @@ func TestForest_GetOpLog(t *testing.T) {
}
}
-func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
+func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
cid := cidtest.ID()
- d := CIDDescriptor{cid, 0, 1}
treeID := "version"
logs := []Move{
{
@@ -484,18 +842,19 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
}
s := constructor(t)
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
t.Run("empty log, no panic", func(t *testing.T) {
- _, err := s.TreeGetOpLog(cid, treeID, 0)
+ _, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
require.ErrorIs(t, err, ErrTreeNotFound)
})
for i := range logs {
- require.NoError(t, s.TreeApply(d, treeID, &logs[i], false))
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &logs[i], false))
}
testGetOpLog := func(t *testing.T, height uint64, m Move) {
- lm, err := s.TreeGetOpLog(cid, treeID, height)
+ lm, err := s.TreeGetOpLog(context.Background(), cid, treeID, height)
require.NoError(t, err)
require.Equal(t, m, lm)
}
@@ -509,7 +868,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
testGetOpLog(t, 261, Move{})
})
t.Run("missing tree", func(t *testing.T) {
- _, err := s.TreeGetOpLog(cid, treeID+"123", 4)
+ _, err := s.TreeGetOpLog(context.Background(), cid, treeID+"123", 4)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
@@ -522,30 +881,39 @@ func TestForest_TreeExists(t *testing.T) {
}
}
-func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) Forest) {
+func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) {
s := constructor(t)
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
- actual, err := s.TreeExists(cid, treeID)
+ actual, err := s.TreeExists(context.Background(), cid, treeID)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
cid := cidtest.ID()
treeID := "version"
- d := CIDDescriptor{cid, 0, 1}
t.Run("empty state, no panic", func(t *testing.T) {
checkExists(t, false, cid, treeID)
})
- require.NoError(t, s.TreeApply(d, treeID, &Move{Parent: 0, Child: 1}, false))
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{Meta: Meta{Time: 11}, Parent: 0, Child: 1}, false))
checkExists(t, true, cid, treeID)
+
+ height, err := s.TreeHeight(context.Background(), cid, treeID)
+ require.NoError(t, err)
+ require.EqualValues(t, 11, height)
+
checkExists(t, false, cidtest.ID(), treeID) // different CID, same tree
- checkExists(t, false, cid, "another tree") // same CID, different tree
+
+ _, err = s.TreeHeight(context.Background(), cidtest.ID(), treeID)
+ require.ErrorIs(t, err, ErrTreeNotFound)
+
+ checkExists(t, false, cid, "another tree") // same CID, different tree
t.Run("can be removed", func(t *testing.T) {
- require.NoError(t, s.TreeDrop(cid, treeID))
+ require.NoError(t, s.TreeDrop(context.Background(), cid, treeID))
checkExists(t, false, cid, treeID)
})
}
@@ -570,16 +938,18 @@ func TestApplyTricky1(t *testing.T) {
}
treeID := "version"
- d := CIDDescriptor{CID: cidtest.ID(), Position: 0, Size: 1}
+ cid := cidtest.ID()
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
for i := range ops {
- require.NoError(t, s.TreeApply(d, treeID, &ops[i], false))
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
for i := range expected {
- _, parent, err := s.TreeGetMeta(d.CID, treeID, expected[i].child)
+ _, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child)
require.NoError(t, err)
require.Equal(t, expected[i].parent, parent)
}
@@ -631,16 +1001,18 @@ func TestApplyTricky2(t *testing.T) {
}
treeID := "version"
- d := CIDDescriptor{CID: cidtest.ID(), Position: 0, Size: 1}
+ cid := cidtest.ID()
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
for i := range ops {
- require.NoError(t, s.TreeApply(d, treeID, &ops[i], false))
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
for i := range expected {
- _, parent, err := s.TreeGetMeta(d.CID, treeID, expected[i].child)
+ _, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child)
require.NoError(t, err)
require.Equal(t, expected[i].parent, parent)
}
@@ -671,7 +1043,7 @@ func TestForest_ParallelApply(t *testing.T) {
// The operations are guaranteed to be applied and returned sorted by `Time`.
func prepareRandomTree(nodeCount, opCount int) []Move {
ops := make([]Move, nodeCount+opCount)
- for i := 0; i < nodeCount; i++ {
+ for i := range nodeCount {
ops[i] = Move{
Parent: 0,
Meta: Meta{
@@ -686,9 +1058,10 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
rand.Read(ops[i].Meta.Items[1].Value)
}
+ r := mrand.New(mrand.NewSource(time.Now().Unix()))
for i := nodeCount; i < len(ops); i++ {
ops[i] = Move{
- Parent: rand.Uint64() % uint64(nodeCount+12),
+ Parent: r.Uint64() % uint64(nodeCount+12),
Meta: Meta{
Time: Timestamp(i + nodeCount),
Items: []KeyValue{
@@ -696,9 +1069,9 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
{Value: make([]byte, 10)},
},
},
- Child: rand.Uint64() % uint64(nodeCount+10),
+ Child: r.Uint64() % uint64(nodeCount+10),
}
- if rand.Uint32()%5 == 0 {
+ if r.Uint32()%5 == 0 {
ops[i].Parent = TrashID
}
rand.Read(ops[i].Meta.Items[1].Value)
@@ -708,10 +1081,10 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
}
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
- for i := uint64(0); i < uint64(nodeCount); i++ {
- expectedMeta, expectedParent, err := expected.TreeGetMeta(cid, treeID, i)
+ for i := range uint64(nodeCount) {
+ expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err)
- actualMeta, actualParent, err := actual.TreeGetMeta(cid, treeID, i)
+ actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err)
require.Equal(t, expectedParent, actualParent, "node id: %d", i)
require.Equal(t, expectedMeta, actualMeta, "node id: %d", i)
@@ -725,48 +1098,42 @@ func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID
require.True(t, ok)
require.Equal(t, se.operations, sa.operations)
require.Equal(t, se.infoMap, sa.infoMap)
-
- require.Equal(t, len(se.childMap), len(sa.childMap))
- for ck, la := range sa.childMap {
- le, ok := se.childMap[ck]
- require.True(t, ok)
- require.ElementsMatch(t, le, la)
- }
}
require.Equal(t, expected, actual, i)
}
}
}
-func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest, batchSize, opCount, iterCount int) {
- rand.Seed(42)
+func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, batchSize, opCount, iterCount int) {
+ r := mrand.New(mrand.NewSource(42))
const nodeCount = 5
ops := prepareRandomTree(nodeCount, opCount)
cid := cidtest.ID()
- d := CIDDescriptor{cid, 0, 1}
treeID := "version"
- expected := constructor(t)
+ expected := constructor(t, WithNoSync(true))
+ defer func() { require.NoError(t, expected.Close(context.Background())) }()
+
for i := range ops {
- require.NoError(t, expected.TreeApply(d, treeID, &ops[i], false))
+ require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
- for i := 0; i < iterCount; i++ {
+ for range iterCount {
// Shuffle random operations, leave initialization in place.
- rand.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
+ r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
- actual := constructor(t, WithMaxBatchSize(batchSize))
+ actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true))
wg := new(sync.WaitGroup)
- ch := make(chan *Move, 0)
- for i := 0; i < batchSize; i++ {
+ ch := make(chan *Move)
+ for range batchSize {
wg.Add(1)
go func() {
defer wg.Done()
for op := range ch {
- require.NoError(t, actual.TreeApply(d, treeID, op, false))
+ require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, op, false))
}
}()
}
@@ -778,11 +1145,12 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
wg.Wait()
compareForests(t, expected, actual, cid, treeID, nodeCount)
+ require.NoError(t, actual.Close(context.Background()))
}
}
-func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
- rand.Seed(42)
+func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
+ r := mrand.New(mrand.NewSource(42))
const (
nodeCount = 5
@@ -792,24 +1160,26 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
ops := prepareRandomTree(nodeCount, opCount)
cid := cidtest.ID()
- d := CIDDescriptor{cid, 0, 1}
treeID := "version"
- expected := constructor(t)
+ expected := constructor(t, WithNoSync(true))
+ defer func() { require.NoError(t, expected.Close(context.Background())) }()
+
for i := range ops {
- require.NoError(t, expected.TreeApply(d, treeID, &ops[i], false))
+ require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
const iterCount = 200
- for i := 0; i < iterCount; i++ {
+ for range iterCount {
// Shuffle random operations, leave initialization in place.
- rand.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
+ r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
- actual := constructor(t)
+ actual := constructor(t, WithNoSync(true))
for i := range ops {
- require.NoError(t, actual.TreeApply(d, treeID, &ops[i], false))
+ require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
compareForests(t, expected, actual, cid, treeID, nodeCount)
+ require.NoError(t, actual.Close(context.Background()))
}
}
@@ -825,17 +1195,20 @@ func BenchmarkApplySequential(b *testing.B) {
b.Run(providers[i].name, func(b *testing.B) {
for _, bs := range batchSizes {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
+ r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
+ defer func() { require.NoError(b, s.Close(context.Background())) }()
+
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
for i := range ops {
ops[i] = Move{
- Parent: uint64(rand.Intn(benchNodeCount)),
+ Parent: uint64(r.Intn(benchNodeCount)),
Meta: Meta{
Time: Timestamp(i),
Items: []KeyValue{{Value: []byte{0, 1, 2, 3, 4}}},
},
- Child: uint64(rand.Intn(benchNodeCount)),
+ Child: uint64(r.Intn(benchNodeCount)),
}
}
return ops
@@ -858,20 +1231,23 @@ func BenchmarkApplyReorderLast(b *testing.B) {
b.Run(providers[i].name, func(b *testing.B) {
for _, bs := range batchSizes {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
+ r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
+ defer func() { require.NoError(b, s.Close(context.Background())) }()
+
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
for i := range ops {
ops[i] = Move{
- Parent: uint64(rand.Intn(benchNodeCount)),
+ Parent: uint64(r.Intn(benchNodeCount)),
Meta: Meta{
Time: Timestamp(i),
Items: []KeyValue{{Value: []byte{0, 1, 2, 3, 4}}},
},
- Child: uint64(rand.Intn(benchNodeCount)),
+ Child: uint64(r.Intn(benchNodeCount)),
}
if i != 0 && i%blockSize == 0 {
- for j := 0; j < blockSize/2; j++ {
+ for j := range blockSize / 2 {
ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j]
}
}
@@ -885,14 +1261,11 @@ func BenchmarkApplyReorderLast(b *testing.B) {
}
func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) {
- rand.Seed(42)
-
ops := genFunc(b.N)
cid := cidtest.ID()
- d := CIDDescriptor{cid, 0, 1}
treeID := "version"
ch := make(chan int, b.N)
- for i := 0; i < b.N; i++ {
+ for i := range b.N {
ch <- i
}
@@ -901,7 +1274,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) {
b.SetParallelism(10)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if err := s.TreeApply(d, treeID, &ops[<-ch], false); err != nil {
+ if err := s.TreeApply(context.Background(), cid, treeID, &ops[<-ch], false); err != nil {
b.Fatalf("error in `Apply`: %v", err)
}
}
@@ -916,9 +1289,9 @@ func TestTreeGetByPath(t *testing.T) {
}
}
-func testTreeGetByPath(t *testing.T, s Forest) {
+func testTreeGetByPath(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
cid := cidtest.ID()
- d := CIDDescriptor{cid, 0, 1}
treeID := "version"
// /
@@ -928,56 +1301,56 @@ func testTreeGetByPath(t *testing.T, s Forest) {
// |- cat1.jpg, Version=XXX (4)
// |- cat1.jpg, Version=YYY (5)
// |- cat2.jpg, Version=ZZZ (6)
- testMove(t, s, 0, 1, 0, d, treeID, "a", "")
- testMove(t, s, 1, 2, 0, d, treeID, "b", "")
- testMove(t, s, 2, 3, 1, d, treeID, "cat1.jpg", "TTT")
- testMove(t, s, 3, 4, 2, d, treeID, "cat1.jpg", "XXX")
- testMove(t, s, 4, 5, 2, d, treeID, "cat1.jpg", "YYY")
- testMove(t, s, 5, 6, 2, d, treeID, "cat2.jpg", "ZZZ")
+ testMove(t, s, 0, 1, 0, cid, treeID, "a", "")
+ testMove(t, s, 1, 2, 0, cid, treeID, "b", "")
+ testMove(t, s, 2, 3, 1, cid, treeID, "cat1.jpg", "TTT")
+ testMove(t, s, 3, 4, 2, cid, treeID, "cat1.jpg", "XXX")
+ testMove(t, s, 4, 5, 2, cid, treeID, "cat1.jpg", "YYY")
+ testMove(t, s, 5, 6, 2, cid, treeID, "cat2.jpg", "ZZZ")
if mf, ok := s.(*memoryForest); ok {
single := mf.treeMap[cid.String()+"/"+treeID]
t.Run("test meta", func(t *testing.T) {
- for i := 0; i < 6; i++ {
+ for i := range 6 {
require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time)
}
})
}
t.Run("invalid attribute", func(t *testing.T) {
- _, err := s.TreeGetByPath(cid, treeID, AttributeVersion, []string{"", "TTT"}, false)
+ _, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeVersion, []string{"", "TTT"}, false)
require.ErrorIs(t, err, ErrNotPathAttribute)
})
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"b", "cat1.jpg"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"b", "cat1.jpg"}, false)
require.NoError(t, err)
require.Equal(t, []Node{4, 5}, nodes)
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"a", "cat1.jpg"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat1.jpg"}, false)
require.Equal(t, []Node{3}, nodes)
t.Run("missing child", func(t *testing.T) {
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"a", "cat3.jpg"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat3.jpg"}, false)
require.True(t, len(nodes) == 0)
})
t.Run("missing parent", func(t *testing.T) {
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"xyz", "cat1.jpg"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"xyz", "cat1.jpg"}, false)
require.True(t, len(nodes) == 0)
})
t.Run("empty path", func(t *testing.T) {
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, nil, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, nil, false)
require.True(t, len(nodes) == 0)
})
}
-func testMove(t *testing.T, s Forest, ts int, node, parent Node, d CIDDescriptor, treeID, filename, version string) {
+func testMove(t *testing.T, s Forest, ts int, node, parent Node, cid cidSDK.ID, treeID, filename, version string) {
items := make([]KeyValue, 1, 2)
items[0] = KeyValue{AttributeFilename, []byte(filename)}
if version != "" {
items = append(items, KeyValue{AttributeVersion, []byte(version)})
}
- require.NoError(t, s.TreeApply(d, treeID, &Move{
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{
Parent: parent,
Child: node,
Meta: Meta{
@@ -995,7 +1368,9 @@ func TestGetTrees(t *testing.T) {
}
}
-func testTreeGetTrees(t *testing.T, s Forest) {
+func testTreeGetTrees(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close(context.Background())) }()
+
cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()}
d := CIDDescriptor{Position: 0, Size: 1}
@@ -1016,7 +1391,7 @@ func testTreeGetTrees(t *testing.T, s Forest) {
d.CID = cid
for _, treeID := range treeIDs[cid] {
- _, err := s.TreeAddByPath(d, treeID, objectSDK.AttributeFileName, []string{"path"}, nil)
+ _, err := s.TreeAddByPath(context.Background(), d, treeID, objectSDK.AttributeFileName, []string{"path"}, nil)
require.NoError(t, err)
}
}
@@ -1024,7 +1399,7 @@ func testTreeGetTrees(t *testing.T, s Forest) {
for _, cid := range cids {
d.CID = cid
- trees, err := s.TreeList(cid)
+ trees, err := s.TreeList(context.Background(), cid)
require.NoError(t, err)
require.ElementsMatch(t, treeIDs[cid], trees)
@@ -1039,43 +1414,100 @@ func TestTreeLastSyncHeight(t *testing.T) {
}
}
-func testTreeLastSyncHeight(t *testing.T, f Forest) {
+func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
+ defer func() { require.NoError(t, f.Close(context.Background())) }()
+
cnr := cidtest.ID()
treeID := "someTree"
t.Run("ErrNotFound if no log operations are stored for a tree", func(t *testing.T) {
- _, err := f.TreeLastSyncHeight(cnr, treeID)
+ _, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.ErrorIs(t, err, ErrTreeNotFound)
- err = f.TreeUpdateLastSyncHeight(cnr, treeID, 1)
+ err = f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 1)
require.ErrorIs(t, err, ErrTreeNotFound)
})
- _, err := f.TreeMove(CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{
+ _, err := f.TreeMove(context.Background(), CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{
Parent: RootID,
Child: 1,
})
require.NoError(t, err)
- h, err := f.TreeLastSyncHeight(cnr, treeID)
+ h, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.NoError(t, err)
require.EqualValues(t, 0, h)
t.Run("separate storages for separate containers", func(t *testing.T) {
- _, err := f.TreeLastSyncHeight(cidtest.ID(), treeID)
+ _, err := f.TreeLastSyncHeight(context.Background(), cidtest.ID(), treeID)
require.ErrorIs(t, err, ErrTreeNotFound)
})
- require.NoError(t, f.TreeUpdateLastSyncHeight(cnr, treeID, 10))
+ require.NoError(t, f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 10))
- h, err = f.TreeLastSyncHeight(cnr, treeID)
+ h, err = f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.NoError(t, err)
require.EqualValues(t, 10, h)
t.Run("removed correctly", func(t *testing.T) {
- require.NoError(t, f.TreeDrop(cnr, treeID))
+ require.NoError(t, f.TreeDrop(context.Background(), cnr, treeID))
- _, err := f.TreeLastSyncHeight(cnr, treeID)
+ _, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
+
+func TestForest_ListTrees(t *testing.T) {
+ for i := range providers {
+ t.Run(providers[i].name, func(t *testing.T) {
+ testTreeListTrees(t, providers[i].construct)
+ })
+ }
+}
+
+func testTreeListTrees(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
+ batchSize := 10
+ t.Run("empty", func(t *testing.T) {
+ testTreeListTreesCount(t, constructor, batchSize, 0)
+ })
+ t.Run("count lower than batch size", func(t *testing.T) {
+ testTreeListTreesCount(t, constructor, batchSize, batchSize-1)
+ })
+ t.Run("count equals batch size", func(t *testing.T) {
+ testTreeListTreesCount(t, constructor, batchSize, batchSize)
+ })
+ t.Run("count greater than batch size", func(t *testing.T) {
+ testTreeListTreesCount(t, constructor, batchSize, batchSize+1)
+ })
+ t.Run("count equals multiplied batch size", func(t *testing.T) {
+ testTreeListTreesCount(t, constructor, batchSize, 3*batchSize)
+ })
+ t.Run("count equals multiplied batch size with addition", func(t *testing.T) {
+ testTreeListTreesCount(t, constructor, batchSize, 3*batchSize+batchSize/2)
+ })
+}
+
+func testTreeListTreesCount(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, batchSize, count int) {
+ f := constructor(t)
+ var expected []ContainerIDTreeID
+
+ treeIDs := []string{"version", "system", "s", "avada kedavra"}
+ for i := range count {
+ cid := cidtest.ID()
+ treeID := treeIDs[i%len(treeIDs)]
+ expected = append(expected, ContainerIDTreeID{
+ CID: cid,
+ TreeID: treeID,
+ })
+
+ ops := prepareRandomTree(5, 5)
+ for _, op := range ops {
+ require.NoError(t, f.TreeApply(context.Background(), cid, treeID, &op, false))
+ }
+ }
+
+ actual, err := treeListAll(context.Background(), f, batchSize)
+ require.NoError(t, err)
+
+ require.ElementsMatch(t, expected, actual)
+}
diff --git a/pkg/local_object_storage/pilorama/generic_test.go b/pkg/local_object_storage/pilorama/generic_test.go
index cac03c4e19..d6a9f333b4 100644
--- a/pkg/local_object_storage/pilorama/generic_test.go
+++ b/pkg/local_object_storage/pilorama/generic_test.go
@@ -1,23 +1,15 @@
package pilorama
import (
- "os"
"path/filepath"
- "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest"
)
func TestGeneric(t *testing.T) {
- defer func() { _ = os.RemoveAll(t.Name()) }()
-
- var n int
newPilorama := func(t *testing.T) storagetest.Component {
- n++
- dir := filepath.Join(t.Name(), strconv.Itoa(n))
- return NewBoltForest(
- WithPath(dir))
+ return NewBoltForest(WithPath(filepath.Join(t.TempDir(), "pilorama")))
}
storagetest.TestAll(t, newPilorama)
diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go
new file mode 100644
index 0000000000..b035be1e15
--- /dev/null
+++ b/pkg/local_object_storage/pilorama/heap.go
@@ -0,0 +1,96 @@
+package pilorama
+
+import (
+ "container/heap"
+ "slices"
+ "strings"
+)
+
+type heapInfo struct {
+ id MultiNode
+ filename string
+}
+
+type filenameHeap []heapInfo
+
+func (h filenameHeap) Len() int { return len(h) }
+func (h filenameHeap) Less(i, j int) bool { return h[i].filename < h[j].filename }
+func (h filenameHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
+func (h *filenameHeap) Push(x any) {
+ *h = append(*h, x.(heapInfo))
+}
+
+func (h *filenameHeap) Pop() any {
+ old := *h
+ n := len(old)
+ x := old[n-1]
+ *h = old[0 : n-1]
+ return x
+}
+
+// fixedHeap maintains a fixed number of smallest elements started at some point.
+type fixedHeap struct {
+ start *Cursor
+ sorted bool
+ count int
+ h *filenameHeap
+}
+
+func newHeap(start *Cursor, count int) *fixedHeap {
+ h := new(filenameHeap)
+ heap.Init(h)
+
+ return &fixedHeap{
+ start: start,
+ count: count,
+ h: h,
+ }
+}
+
+const amortizationMultiplier = 5
+
+func (h *fixedHeap) push(id MultiNode, filename string) bool {
+ if h.start != nil {
+ if filename < h.start.GetFilename() {
+ return false
+ } else if filename == h.start.GetFilename() {
+ // A tree may have a lot of nodes with the same filename but different versions so that
+ // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call
+ // with the same filename.
+ pos := slices.Index(id, h.start.GetNode())
+ if pos == -1 || pos+1 >= len(id) {
+ return false
+ }
+ id = id[pos+1:]
+ }
+ }
+
+ *h.h = append(*h.h, heapInfo{id: id, filename: filename})
+ h.sorted = false
+
+ if h.h.Len() > h.count*amortizationMultiplier {
+ slices.SortFunc(*h.h, func(a, b heapInfo) int {
+ return strings.Compare(a.filename, b.filename)
+ })
+ *h.h = (*h.h)[:h.count]
+ }
+ return true
+}
+
+func (h *fixedHeap) pop() (heapInfo, bool) {
+ if !h.sorted {
+ slices.SortFunc(*h.h, func(a, b heapInfo) int {
+ return strings.Compare(a.filename, b.filename)
+ })
+ if len(*h.h) > h.count {
+ *h.h = (*h.h)[:h.count]
+ }
+ h.sorted = true
+ }
+ if len(*h.h) != 0 {
+ info := (*h.h)[0]
+ *h.h = (*h.h)[1:]
+ return info, true
+ }
+ return heapInfo{}, false
+}
diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go
index 92dc9b6aa0..28b7faec8c 100644
--- a/pkg/local_object_storage/pilorama/inmemory.go
+++ b/pkg/local_object_storage/pilorama/inmemory.go
@@ -1,5 +1,10 @@
package pilorama
+import (
+ "cmp"
+ "slices"
+)
+
// nodeInfo couples parent and metadata.
type nodeInfo struct {
Parent Node
@@ -12,50 +17,33 @@ type move struct {
Old nodeInfo
}
-// state represents state being replicated.
-type state struct {
+// memoryTree represents memoryTree being replicated.
+type memoryTree struct {
operations []move
tree
}
-// newState constructs new empty tree.
-func newState() *state {
- return &state{
- tree: *newTree(),
+// newMemoryTree constructs new empty tree.
+func newMemoryTree() *memoryTree {
+ return &memoryTree{
+ tree: tree{
+ infoMap: make(map[Node]nodeInfo),
+ },
}
}
// undo un-does op and changes s in-place.
-func (s *state) undo(op *move) {
- children := s.tree.childMap[op.Parent]
- for i := range children {
- if children[i] == op.Child {
- if len(children) > 1 {
- s.tree.childMap[op.Parent] = append(children[:i], children[i+1:]...)
- } else {
- delete(s.tree.childMap, op.Parent)
- }
- break
- }
- }
-
+func (s *memoryTree) undo(op *move) {
if op.HasOld {
- s.tree.infoMap[op.Child] = op.Old
- oldChildren := s.tree.childMap[op.Old.Parent]
- for i := range oldChildren {
- if oldChildren[i] == op.Child {
- return
- }
- }
- s.tree.childMap[op.Old.Parent] = append(oldChildren, op.Child)
+ s.infoMap[op.Child] = op.Old
} else {
- delete(s.tree.infoMap, op.Child)
+ delete(s.infoMap, op.Child)
}
}
// Apply puts op in log at a proper position, re-applies all subsequent operations
// from log and changes s in-place.
-func (s *state) Apply(op *Move) error {
+func (s *memoryTree) Apply(op *Move) error {
var index int
for index = len(s.operations); index > 0; index-- {
if s.operations[index-1].Time <= op.Time {
@@ -82,17 +70,21 @@ func (s *state) Apply(op *Move) error {
}
// do performs a single move operation on a tree.
-func (s *state) do(op *Move) move {
+func (s *memoryTree) do(op *Move) move {
+ m := op.Meta
+ if m.Items == nil {
+ m.Items = []KeyValue{}
+ }
lm := move{
Move: Move{
Parent: op.Parent,
- Meta: op.Meta,
+ Meta: m,
Child: op.Child,
},
}
- shouldPut := !s.tree.isAncestor(op.Child, op.Parent)
- p, ok := s.tree.infoMap[op.Child]
+ shouldPut := !s.isAncestor(op.Child, op.Parent)
+ p, ok := s.infoMap[op.Child]
if ok {
lm.HasOld = true
lm.Old = p
@@ -104,36 +96,23 @@ func (s *state) do(op *Move) move {
if !ok {
p.Meta.Time = op.Time
- } else {
- s.removeChild(op.Child, p.Parent)
}
- p.Meta = op.Meta
+ p.Meta = m
p.Parent = op.Parent
- s.tree.infoMap[op.Child] = p
- s.tree.childMap[op.Parent] = append(s.tree.childMap[op.Parent], op.Child)
+ s.infoMap[op.Child] = p
return lm
}
-func (s *state) removeChild(child, parent Node) {
- oldChildren := s.tree.childMap[parent]
- for i := range oldChildren {
- if oldChildren[i] == child {
- s.tree.childMap[parent] = append(oldChildren[:i], oldChildren[i+1:]...)
- break
- }
- }
-}
-
-func (s *state) timestamp(pos, size int) Timestamp {
+func (s *memoryTree) timestamp(pos, size int) Timestamp {
if len(s.operations) == 0 {
return nextTimestamp(0, uint64(pos), uint64(size))
}
return nextTimestamp(s.operations[len(s.operations)-1].Time, uint64(pos), uint64(size))
}
-func (s *state) findSpareID() Node {
+func (s *memoryTree) findSpareID() Node {
id := uint64(1)
for _, ok := s.infoMap[id]; ok; _, ok = s.infoMap[id] {
id++
@@ -145,14 +124,22 @@ func (s *state) findSpareID() Node {
type tree struct {
syncHeight uint64
infoMap map[Node]nodeInfo
- childMap map[Node][]Node
}
-func newTree() *tree {
- return &tree{
- childMap: make(map[Node][]Node),
- infoMap: make(map[Node]nodeInfo),
+func (t tree) getChildren(parent Node) []Node {
+ var children []Node
+ for c, info := range t.infoMap {
+ if info.Parent == parent {
+ children = append(children, c)
+ }
}
+
+ slices.SortFunc(children, func(ci, cj uint64) int {
+ a := t.infoMap[ci]
+ b := t.infoMap[cj]
+ return cmp.Compare(a.Meta.Time, b.Meta.Time)
+ })
+ return children
}
// isAncestor returns true if parent is an ancestor of a child.
@@ -176,7 +163,7 @@ func (t tree) getPathPrefix(attr string, path []string) (int, Node) {
loop:
for i := range path {
- children := t.childMap[curNode]
+ children := t.getChildren(curNode)
for j := range children {
meta := t.infoMap[children[j]].Meta
f := meta.GetAttr(attr)
@@ -191,9 +178,10 @@ loop:
return len(path), curNode
}
-// get returns list of nodes which have the specified path from root
+// getByPath returns list of nodes which have the specified path from root
// descending by values of attr from meta.
-func (t tree) get(attr string, path []string, latest bool) []Node {
+// If latest is true, only the latest node is returned.
+func (t tree) getByPath(attr string, path []string, latest bool) []Node {
if len(path) == 0 {
return nil
}
@@ -204,15 +192,15 @@ func (t tree) get(attr string, path []string, latest bool) []Node {
}
var nodes []Node
- var lastTs Timestamp
+ var lastTS Timestamp
- children := t.childMap[curNode]
+ children := t.getChildren(curNode)
for i := range children {
info := t.infoMap[children[i]]
fileName := string(info.Meta.GetAttr(attr))
if fileName == path[len(path)-1] {
if latest {
- if info.Meta.Time >= lastTs {
+ if info.Meta.Time >= lastTS {
nodes = append(nodes[:0], children[i])
}
} else {
@@ -223,8 +211,3 @@ func (t tree) get(attr string, path []string, latest bool) []Node {
return nodes
}
-
-// getMeta returns meta information of node n.
-func (t tree) getMeta(n Node) Meta {
- return t.infoMap[n].Meta
-}
diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go
index c2143de241..e1f6cd8e7f 100644
--- a/pkg/local_object_storage/pilorama/interface.go
+++ b/pkg/local_object_storage/pilorama/interface.go
@@ -1,6 +1,8 @@
package pilorama
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -11,53 +13,65 @@ type Forest interface {
// TreeMove moves node in the tree.
// If the parent of the move operation is TrashID, the node is removed.
// If the child of the move operation is RootID, new ID is generated and added to a tree.
- TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, error)
+ TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error)
// TreeAddByPath adds new node in the tree using provided path.
// The path is constructed by descending from the root using the values of the attr in meta.
// Internal nodes in path should have exactly one attribute, otherwise a new node is created.
- TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error)
+ TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error)
// TreeApply applies replicated operation from another node.
// If background is true, TreeApply will first check whether an operation exists.
- TreeApply(d CIDDescriptor, treeID string, m *Move, backgroundSync bool) error
+ TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
+ // TreeApplyBatch applies replicated operations from another node.
+ TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error
// TreeGetByPath returns all nodes corresponding to the path.
// The path is constructed by descending from the root using the values of the
// AttributeFilename in meta.
// The last argument determines whether only the node with the latest timestamp is returned.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the path is not in the tree.
- TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error)
+ TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error)
// TreeGetMeta returns meta information of the node with the specified ID.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
- TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error)
+ TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error)
// TreeGetChildren returns children of the node with the specified ID. The order is arbitrary.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
- TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error)
+ TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error)
+ // TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute..
+ // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
+ TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error)
// TreeGetOpLog returns first log operation stored at or above the height.
// In case no such operation is found, empty Move and nil error should be returned.
- TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error)
+ TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
// TreeDrop drops a tree from the database.
// If the tree is not found, ErrTreeNotFound should be returned.
// In case of empty treeID drops all trees related to container.
- TreeDrop(cid cidSDK.ID, treeID string) error
+ TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error
// TreeList returns all the tree IDs that have been added to the
// passed container ID. Nil slice should be returned if no tree found.
- TreeList(cid cidSDK.ID) ([]string, error)
+ TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error)
// TreeExists checks if a tree exists locally.
// If the tree is not found, false and a nil error should be returned.
- TreeExists(cid cidSDK.ID, treeID string) (bool, error)
+ TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error)
// TreeUpdateLastSyncHeight updates last log height synchronized with _all_ container nodes.
- TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error
+ TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error
// TreeLastSyncHeight returns last log height synchronized with _all_ container nodes.
- TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error)
+ TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error)
+ // TreeHeight returns current tree height.
+ TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error)
}
type ForestStorage interface {
// DumpInfo returns information about the pilorama.
DumpInfo() Info
- Init() error
- Open(bool) error
- Close() error
- SetMode(m mode.Mode) error
+ Init(context.Context) error
+ Open(context.Context, mode.Mode) error
+ Close(context.Context) error
+ SetMode(context.Context, mode.Mode) error
+ SetParentID(id string)
Forest
+
+ // TreeListTrees returns all pairs "containerID:treeID".
+ TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error)
+ TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error
}
const (
@@ -65,6 +79,38 @@ const (
AttributeVersion = "Version"
)
+// Cursor keeps state between function calls for traversing nodes.
+// It stores the attributes associated with a previous call, allowing subsequent operations
+// to resume traversal from this point rather than starting from the beginning.
+type Cursor struct {
+ // Last traversed filename.
+ filename string
+
+ // Last traversed node.
+ node Node
+}
+
+func NewCursor(filename string, node Node) *Cursor {
+ return &Cursor{
+ filename: filename,
+ node: node,
+ }
+}
+
+func (c *Cursor) GetFilename() string {
+ if c == nil {
+ return ""
+ }
+ return c.filename
+}
+
+func (c *Cursor) GetNode() Node {
+ if c == nil {
+ return Node(0)
+ }
+ return c.node
+}
+
// CIDDescriptor contains container ID and information about the node position
// in the list of container nodes.
type CIDDescriptor struct {
@@ -80,3 +126,68 @@ var ErrInvalidCIDDescriptor = logicerr.New("cid descriptor is invalid")
func (d CIDDescriptor) checkValid() bool {
return 0 <= d.Position && d.Position < d.Size
}
+
+var treeListTreesBatchSizeDefault = 1000
+
+type ContainerIDTreeID struct {
+ CID cidSDK.ID
+ TreeID string
+}
+
+type TreeListTreesPrm struct {
+ NextPageToken []byte
+ // BatchSize is batch size to list trees. If not lower or equals zero, than treeListTreesBatchSizeDefault is used.
+ BatchSize int
+}
+
+type TreeListTreesResult struct {
+ NextPageToken []byte
+ Items []ContainerIDTreeID
+}
+
+type treeList interface {
+ TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error)
+}
+
+func TreeListAll(ctx context.Context, f treeList) ([]ContainerIDTreeID, error) {
+ return treeListAll(ctx, f, treeListTreesBatchSizeDefault)
+}
+
+func treeListAll(ctx context.Context, f treeList, batchSize int) ([]ContainerIDTreeID, error) {
+ var prm TreeListTreesPrm
+ prm.BatchSize = batchSize
+ var result []ContainerIDTreeID
+ first := true
+
+ for len(prm.NextPageToken) > 0 || first {
+ first = false
+
+ res, err := f.TreeListTrees(ctx, prm)
+ if err != nil {
+ return nil, err
+ }
+ prm.NextPageToken = res.NextPageToken
+ result = append(result, res.Items...)
+ }
+
+ return result, nil
+}
+
+func TreeCountAll(ctx context.Context, f treeList) (uint64, error) {
+ var prm TreeListTreesPrm
+ var result uint64
+ first := true
+
+ for len(prm.NextPageToken) > 0 || first {
+ first = false
+
+ res, err := f.TreeListTrees(ctx, prm)
+ if err != nil {
+ return 0, err
+ }
+ prm.NextPageToken = res.NextPageToken
+ result += uint64(len(res.Items))
+ }
+
+ return result, nil
+}
diff --git a/pkg/local_object_storage/pilorama/meta.go b/pkg/local_object_storage/pilorama/meta.go
index 49b7f64774..45e9c2f790 100644
--- a/pkg/local_object_storage/pilorama/meta.go
+++ b/pkg/local_object_storage/pilorama/meta.go
@@ -21,7 +21,11 @@ func (x Meta) Bytes() []byte {
}
func (x Meta) GetAttr(name string) []byte {
- for _, kv := range x.Items {
+ return findAttr(x.Items, name)
+}
+
+func findAttr(ms []KeyValue, name string) []byte {
+ for _, kv := range ms {
if kv.Key == name {
return kv.Value
}
diff --git a/pkg/local_object_storage/pilorama/meta_test.go b/pkg/local_object_storage/pilorama/meta_test.go
index 7adb97abd7..f329f60924 100644
--- a/pkg/local_object_storage/pilorama/meta_test.go
+++ b/pkg/local_object_storage/pilorama/meta_test.go
@@ -1,7 +1,7 @@
package pilorama
import (
- "math/rand"
+ "crypto/rand"
"testing"
"github.com/stretchr/testify/require"
@@ -21,7 +21,8 @@ func TestMeta_Bytes(t *testing.T) {
Items: []KeyValue{
{"abc", []byte{1, 2, 3}},
{AttributeFilename, []byte{}},
- }}
+ },
+ }
data := expected.Bytes()
@@ -35,7 +36,8 @@ func TestMeta_Bytes(t *testing.T) {
Items: []KeyValue{
{"abc", []byte{1, 2, 3}},
{"xyz", []byte{5, 6, 7, 8}},
- }}
+ },
+ }
data := expected.Bytes()
diff --git a/pkg/local_object_storage/pilorama/metrics.go b/pkg/local_object_storage/pilorama/metrics.go
new file mode 100644
index 0000000000..6ffc479e41
--- /dev/null
+++ b/pkg/local_object_storage/pilorama/metrics.go
@@ -0,0 +1,23 @@
+package pilorama
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+type Metrics interface {
+ SetParentID(id string)
+
+ SetMode(m mode.ComponentMode)
+ Close()
+
+ AddMethodDuration(method string, d time.Duration, success bool)
+}
+
+type noopMetrics struct{}
+
+func (m *noopMetrics) SetParentID(string) {}
+func (m *noopMetrics) SetMode(mode.ComponentMode) {}
+func (m *noopMetrics) Close() {}
+func (m *noopMetrics) AddMethodDuration(string, time.Duration, bool) {}
diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go
new file mode 100644
index 0000000000..0c042aa562
--- /dev/null
+++ b/pkg/local_object_storage/pilorama/mode_test.go
@@ -0,0 +1,31 @@
+package pilorama
+
+import (
+ "context"
+ "path/filepath"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_Mode(t *testing.T) {
+ t.Parallel()
+ f := NewBoltForest(
+ []Option{
+ WithPath(filepath.Join(t.TempDir(), "test.db")),
+ WithMaxBatchSize(1),
+ }...)
+
+ require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly))
+ require.Nil(t, f.(*boltForest).db)
+ require.NoError(t, f.Init(context.Background()))
+ require.Nil(t, f.(*boltForest).db)
+ require.NoError(t, f.Close(context.Background()))
+
+ require.NoError(t, f.Open(context.Background(), mode.Degraded))
+ require.Nil(t, f.(*boltForest).db)
+ require.NoError(t, f.Init(context.Background()))
+ require.Nil(t, f.(*boltForest).db)
+ require.NoError(t, f.Close(context.Background()))
+}
diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go
new file mode 100644
index 0000000000..36d347f100
--- /dev/null
+++ b/pkg/local_object_storage/pilorama/multinode.go
@@ -0,0 +1,53 @@
+package pilorama
+
+import "bytes"
+
+// MultiNode represents a group of internal nodes accessible by the same path, but having different id.
+type MultiNode []Node
+
+// MultiNodeInfo represents a group of internal nodes accessible by the same path, but having different id.
+type MultiNodeInfo struct {
+ Children MultiNode
+ Parents MultiNode
+ Timestamps []uint64
+ Meta []KeyValue
+}
+
+func (r *MultiNodeInfo) Add(info NodeInfo) bool {
+ if !isInternal(info.Meta.Items) || !isInternal(r.Meta) ||
+ !bytes.Equal(r.Meta[0].Value, info.Meta.Items[0].Value) {
+ return false
+ }
+
+ r.Children = append(r.Children, info.ID)
+ r.Parents = append(r.Parents, info.ParentID)
+ r.Timestamps = append(r.Timestamps, info.Meta.Time)
+ return true
+}
+
+func (r *MultiNodeInfo) LastChild() Node {
+ return r.Children[len(r.Children)-1]
+}
+
+func (n NodeInfo) ToMultiNode() MultiNodeInfo {
+ return MultiNodeInfo{
+ Children: MultiNode{n.ID},
+ Parents: MultiNode{n.ParentID},
+ Timestamps: []uint64{n.Meta.Time},
+ Meta: n.Meta.Items,
+ }
+}
+
+func isInternal(m []KeyValue) bool {
+ return len(m) == 1 && m[0].Key == AttributeFilename
+}
+
+func mergeNodeInfos(ns []NodeInfo) []MultiNodeInfo {
+ var r []MultiNodeInfo
+ for _, info := range ns {
+ if len(r) == 0 || !r[len(r)-1].Add(info) {
+ r = append(r, info.ToMultiNode())
+ }
+ }
+ return r
+}
diff --git a/pkg/local_object_storage/pilorama/option.go b/pkg/local_object_storage/pilorama/option.go
index ccee0170f2..d576d427f7 100644
--- a/pkg/local_object_storage/pilorama/option.go
+++ b/pkg/local_object_storage/pilorama/option.go
@@ -2,6 +2,7 @@ package pilorama
import (
"io/fs"
+ "os"
"time"
)
@@ -13,6 +14,8 @@ type cfg struct {
noSync bool
maxBatchDelay time.Duration
maxBatchSize int
+ openFile func(string, int, fs.FileMode) (*os.File, error)
+ metrics Metrics
}
func WithPath(path string) Option {
@@ -44,3 +47,15 @@ func WithMaxBatchSize(size int) Option {
c.maxBatchSize = size
}
}
+
+func WithOpenFile(openFile func(string, int, fs.FileMode) (*os.File, error)) Option {
+ return func(c *cfg) {
+ c.openFile = openFile
+ }
+}
+
+func WithMetrics(m Metrics) Option {
+ return func(c *cfg) {
+ c.metrics = m
+ }
+}
diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go
new file mode 100644
index 0000000000..eecee1527b
--- /dev/null
+++ b/pkg/local_object_storage/pilorama/split_test.go
@@ -0,0 +1,155 @@
+package pilorama
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDuplicateDirectory(t *testing.T) {
+ for i := range providers {
+ if providers[i].name == "inmemory" {
+ continue
+ }
+ t.Run(providers[i].name, func(t *testing.T) {
+ testDuplicateDirectory(t, providers[i].construct(t))
+ })
+ }
+}
+
+func testDuplicateDirectory(t *testing.T, f Forest) {
+ ctx := context.Background()
+ d := CIDDescriptor{CID: cidtest.ID(), Size: 1}
+ treeID := "sometree"
+
+ treeApply := func(t *testing.T, parent, child uint64, filename string, internal bool) {
+ // Nothing magic here, we add items in order and children are unique.
+ // This simplifies function interface a bit.
+ ts := child
+
+ kv := []KeyValue{{Key: AttributeFilename, Value: []byte(filename)}}
+ if !internal {
+ kv = append(kv, KeyValue{Key: "uniqueAttr", Value: []byte{byte(child)}})
+ }
+
+ err := f.TreeApply(ctx, d.CID, treeID, &Move{
+ Parent: parent,
+ Child: child,
+ Meta: Meta{
+ Time: ts,
+ Items: kv,
+ },
+ }, true)
+ require.NoError(t, err)
+ }
+
+ // The following tree is constructed:
+ // 0
+ // [1] |-- dir1 (internal)
+ // [2] |-- value1
+ // [3] |-- dir3 (internal)
+ // [4] |-- value3
+ // [5] |-- dir1 (internal)
+ // [6] |-- value2
+ // [7] |-- dir3 (internal)
+ // [8] |-- value4
+ // [9] |-- dir2 (internal)
+ // [10] |-- value0
+ treeApply(t, RootID, 1, "dir1", true)
+ treeApply(t, 1, 2, "value1", false)
+ treeApply(t, 1, 3, "dir3", true)
+ treeApply(t, 3, 4, "value3", false)
+ treeApply(t, RootID, 5, "dir1", true)
+ treeApply(t, 5, 6, "value2", false)
+ treeApply(t, 5, 7, "dir3", true)
+ treeApply(t, 7, 8, "value4", false)
+ treeApply(t, RootID, 9, "dir2", true)
+ treeApply(t, RootID, 10, "value0", false)
+
+ // The compacted view:
+ // 0
+ // [1,5] |-- dir1 (internal)
+ // [2] |-- value1
+ // [3,7] |-- dir3 (internal)
+ // [4] |-- value3
+ // [8] |-- value4
+ // [6] |-- value2
+ // [9] |-- dir2 (internal)
+ // [10] |-- value0
+ testGetByPath := func(t *testing.T, p string) []byte {
+ pp := strings.Split(p, "/")
+ nodes, err := f.TreeGetByPath(context.Background(), d.CID, treeID, AttributeFilename, pp, false)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(nodes))
+
+ meta, _, err := f.TreeGetMeta(ctx, d.CID, treeID, nodes[0])
+ require.NoError(t, err)
+ require.Equal(t, []byte(pp[len(pp)-1]), meta.GetAttr(AttributeFilename))
+ return meta.GetAttr("uniqueAttr")
+ }
+
+ require.Equal(t, []byte{2}, testGetByPath(t, "dir1/value1"))
+ require.Equal(t, []byte{4}, testGetByPath(t, "dir1/dir3/value3"))
+ require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4"))
+ require.Equal(t, []byte{10}, testGetByPath(t, "value0"))
+
+ testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) {
+ res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize)
+ require.NoError(t, err)
+ return res, last
+ }
+
+ t.Run("test sorted listing, full children branch", func(t *testing.T) {
+ t.Run("big batch size", func(t *testing.T) {
+ res, _ := testSortedByFilename(t, MultiNode{RootID}, nil, 10)
+ require.Equal(t, 3, len(res))
+ require.Equal(t, MultiNode{1, 5}, res[0].Children)
+ require.Equal(t, MultiNode{9}, res[1].Children)
+ require.Equal(t, MultiNode{10}, res[2].Children)
+
+ t.Run("multi-root", func(t *testing.T) {
+ res, _ := testSortedByFilename(t, MultiNode{1, 5}, nil, 10)
+ require.Equal(t, 3, len(res))
+ require.Equal(t, MultiNode{3, 7}, res[0].Children)
+ require.Equal(t, MultiNode{2}, res[1].Children)
+ require.Equal(t, MultiNode{6}, res[2].Children)
+ })
+ })
+ t.Run("small batch size", func(t *testing.T) {
+ res, last := testSortedByFilename(t, MultiNode{RootID}, nil, 1)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, MultiNode{1, 5}, res[0].Children)
+
+ res, last = testSortedByFilename(t, MultiNode{RootID}, last, 1)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, MultiNode{9}, res[0].Children)
+
+ res, last = testSortedByFilename(t, MultiNode{RootID}, last, 1)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, MultiNode{10}, res[0].Children)
+
+ res, _ = testSortedByFilename(t, MultiNode{RootID}, last, 1)
+ require.Equal(t, 0, len(res))
+
+ t.Run("multi-root", func(t *testing.T) {
+ res, last := testSortedByFilename(t, MultiNode{1, 5}, nil, 1)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, MultiNode{3, 7}, res[0].Children)
+
+ res, last = testSortedByFilename(t, MultiNode{1, 5}, last, 1)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, MultiNode{2}, res[0].Children)
+
+ res, last = testSortedByFilename(t, MultiNode{1, 5}, last, 1)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, MultiNode{6}, res[0].Children)
+
+ res, _ = testSortedByFilename(t, MultiNode{RootID}, last, 1)
+ require.Equal(t, 0, len(res))
+ })
+ })
+ })
+}
diff --git a/pkg/local_object_storage/pilorama/types.go b/pkg/local_object_storage/pilorama/types.go
index 99918683de..8d86163649 100644
--- a/pkg/local_object_storage/pilorama/types.go
+++ b/pkg/local_object_storage/pilorama/types.go
@@ -55,3 +55,9 @@ var (
func isAttributeInternal(key string) bool {
return key == AttributeFilename
}
+
+type NodeInfo struct {
+ ID Node
+ Meta Meta
+ ParentID Node
+}
diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go
index 24090e8d8c..b4015ae8d2 100644
--- a/pkg/local_object_storage/shard/container.go
+++ b/pkg/local_object_storage/shard/container.go
@@ -1,9 +1,13 @@
package shard
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type ContainerSizePrm struct {
@@ -22,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 {
return r.size
}
-func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
+func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
s.m.RLock()
defer s.m.RUnlock()
@@ -30,12 +34,117 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
return ContainerSizeRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ContainerSizeRes{}, err
+ }
+ defer release()
+
size, err := s.metaBase.ContainerSize(prm.cnr)
if err != nil {
- return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err)
+ return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err)
}
return ContainerSizeRes{
size: size,
}, nil
}
+
+type ContainerCountPrm struct {
+ ContainerID cid.ID
+}
+
+type ContainerCountRes struct {
+ Phy uint64
+ Logic uint64
+ User uint64
+}
+
+func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (ContainerCountRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ContainerCount",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Stringer("container_id", prm.ContainerID),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ContainerCountRes{}, ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ContainerCountRes{}, err
+ }
+ defer release()
+
+ counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID)
+ if err != nil {
+ return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err)
+ }
+
+ return ContainerCountRes{
+ Phy: counters.Phy,
+ Logic: counters.Logic,
+ User: counters.User,
+ }, nil
+}
+
+func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.DeleteContainerSize",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Stringer("container_id", id),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ return s.metaBase.DeleteContainerSize(ctx, id)
+}
+
+func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.DeleteContainerCount",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Stringer("container_id", id),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ return s.metaBase.DeleteContainerCount(ctx, id)
+}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 6d17114bf0..a607f70f7d 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -1,48 +1,62 @@
package shard
import (
+ "context"
"errors"
"fmt"
+ "slices"
+ "sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
)
-func (s *Shard) handleMetabaseFailure(stage string, err error) error {
- s.log.Error("metabase failure, switching mode",
+func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err error) error {
+ s.log.Error(ctx, logs.ShardMetabaseFailureSwitchingMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.ReadOnly),
zap.Error(err))
- err = s.SetMode(mode.ReadOnly)
+ err = s.SetMode(ctx, mode.ReadOnly)
if err == nil {
return nil
}
- s.log.Error("can't move shard to readonly, switch mode",
+ s.log.Error(ctx, logs.ShardCantMoveShardToReadonlySwitchMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.DegradedReadOnly),
zap.Error(err))
- err = s.SetMode(mode.DegradedReadOnly)
+ err = s.SetMode(ctx, mode.DegradedReadOnly)
if err != nil {
- return fmt.Errorf("could not switch to mode %s", mode.DegradedReadOnly)
+ return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly)
}
return nil
}
// Open opens all Shard's components.
-func (s *Shard) Open() error {
- components := []interface{ Open(bool) error }{
- s.blobStor, s.metaBase,
+func (s *Shard) Open(ctx context.Context) error {
+ components := []interface {
+ Open(context.Context, mode.Mode) error
+ }{
+ s.blobStor,
+ }
+ m := s.GetMode()
+
+ if !m.NoMetabase() {
+ components = append(components, s.metaBase)
}
- if s.hasWriteCache() {
+ if s.hasWriteCache() && !m.NoMetabase() {
components = append(components, s.writeCache)
}
@@ -51,17 +65,17 @@ func (s *Shard) Open() error {
}
for i, component := range components {
- if err := component.Open(false); err != nil {
+ if err := component.Open(ctx, m); err != nil {
if component == s.metaBase {
// We must first open all other components to avoid
// opening non-existent DB in read-only mode.
for j := i + 1; j < len(components); j++ {
- if err := components[j].Open(false); err != nil {
+ if err := components[j].Open(ctx, m); err != nil {
// Other components must be opened, fail.
- return fmt.Errorf("could not open %T: %w", components[j], err)
+ return fmt.Errorf("open %T: %w", components[j], err)
}
}
- err = s.handleMetabaseFailure("open", err)
+ err = s.handleMetabaseFailure(ctx, "open", err)
if err != nil {
return err
}
@@ -69,7 +83,7 @@ func (s *Shard) Open() error {
break
}
- return fmt.Errorf("could not open %T: %w", component, err)
+ return fmt.Errorf("open %T: %w", component, err)
}
}
return nil
@@ -77,22 +91,62 @@ func (s *Shard) Open() error {
type metabaseSynchronizer Shard
-func (x *metabaseSynchronizer) Init() error {
- return (*Shard)(x).refillMetabase()
+func (x *metabaseSynchronizer) Init(ctx context.Context) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init")
+ defer span.End()
+
+ return (*Shard)(x).refillMetabase(ctx)
}
// Init initializes all Shard's components.
-func (s *Shard) Init() error {
+func (s *Shard) Init(ctx context.Context) error {
+ m := s.GetMode()
+ if err := s.initializeComponents(ctx, m); err != nil {
+ return err
+ }
+
+ s.updateMetrics(ctx)
+
+ s.gc = &gc{
+ gcCfg: &s.gcCfg,
+ remover: s.removeGarbage,
+ stopChannel: make(chan struct{}),
+ newEpochChan: make(chan uint64),
+ newEpochHandlers: &newEpochHandlers{
+ cancelFunc: func() {},
+ handlers: []newEpochHandler{
+ s.collectExpiredLocks,
+ s.collectExpiredObjects,
+ s.collectExpiredTombstones,
+ s.collectExpiredMetrics,
+ },
+ },
+ }
+ if s.gc.metrics != nil {
+ s.gc.metrics.SetShardID(s.info.ID.String())
+ }
+
+ s.gc.init(ctx)
+
+ s.rb = newRebuilder()
+ if !m.NoMetabase() {
+ s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
+ }
+ s.writecacheSealCancel.Store(dummyCancel)
+ return nil
+}
+
+func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
type initializer interface {
- Init() error
+ Init(context.Context) error
}
var components []initializer
- if !s.GetMode().NoMetabase() {
+ if !m.NoMetabase() {
var initMetabase initializer
- if s.needRefillMetabase() {
+ if s.NeedRefillMetabase() {
initMetabase = (*metabaseSynchronizer)(s)
} else {
initMetabase = s.metaBase
@@ -105,7 +159,7 @@ func (s *Shard) Init() error {
components = []initializer{s.blobStor}
}
- if s.hasWriteCache() {
+ if s.hasWriteCache() && !m.NoMetabase() {
components = append(components, s.writeCache)
}
@@ -114,13 +168,13 @@ func (s *Shard) Init() error {
}
for _, component := range components {
- if err := component.Init(); err != nil {
+ if err := component.Init(ctx); err != nil {
if component == s.metaBase {
- if errors.Is(err, meta.ErrOutdatedVersion) {
+ if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) {
return fmt.Errorf("metabase initialization: %w", err)
}
- err = s.handleMetabaseFailure("init", err)
+ err = s.handleMetabaseFailure(ctx, "init", err)
if err != nil {
return err
}
@@ -128,128 +182,200 @@ func (s *Shard) Init() error {
break
}
- return fmt.Errorf("could not initialize %T: %w", component, err)
+ return fmt.Errorf("initialize %T: %w", component, err)
}
}
-
- s.updateMetrics()
-
- s.gc = &gc{
- gcCfg: &s.gcCfg,
- remover: s.removeGarbage,
- stopChannel: make(chan struct{}),
- eventChan: make(chan Event),
- mEventHandler: map[eventType]*eventHandlers{
- eventNewEpoch: {
- cancelFunc: func() {},
- handlers: []eventHandler{
- s.collectExpiredObjects,
- s.collectExpiredTombstones,
- s.collectExpiredLocks,
- },
- },
- },
- }
-
- s.gc.init()
-
return nil
}
-func (s *Shard) refillMetabase() error {
+func (s *Shard) refillMetabase(ctx context.Context) error {
+ path := s.metaBase.DumpInfo().Path
+ s.metricsWriter.SetRefillStatus(path, "running")
+ s.metricsWriter.SetRefillPercent(path, 0)
+ var success bool
+ defer func() {
+ if success {
+ s.metricsWriter.SetRefillStatus(path, "completed")
+ } else {
+ s.metricsWriter.SetRefillStatus(path, "failed")
+ }
+ }()
+
err := s.metaBase.Reset()
if err != nil {
- return fmt.Errorf("could not reset metabase: %w", err)
+ return fmt.Errorf("reset metabase: %w", err)
}
- obj := objectSDK.New()
+ withCount := true
+ totalObjects, err := s.blobStor.ObjectsCount(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
+ withCount = false
+ }
- err = blobstor.IterateBinaryObjects(s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
- if err := obj.Unmarshal(data); err != nil {
- s.log.Warn("could not unmarshal object",
- zap.Stringer("address", addr),
- zap.String("err", err.Error()))
+ eg, egCtx := errgroup.WithContext(ctx)
+ if s.refillMetabaseWorkersCount > 0 {
+ eg.SetLimit(s.refillMetabaseWorkersCount)
+ }
+
+ var completedCount uint64
+ var metricGuard sync.Mutex
+ itErr := blobstor.IterateBinaryObjects(egCtx, s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
+ eg.Go(func() error {
+ var success bool
+ defer func() {
+ s.metricsWriter.IncRefillObjectsCount(path, len(data), success)
+ if withCount {
+ metricGuard.Lock()
+ completedCount++
+ s.metricsWriter.SetRefillPercent(path, uint32(completedCount*100/totalObjects))
+ metricGuard.Unlock()
+ }
+ }()
+
+ if err := s.refillObject(egCtx, data, addr, descriptor); err != nil {
+ return err
+ }
+ success = true
+ return nil
+ })
+
+ select {
+ case <-egCtx.Done():
+ return egCtx.Err()
+ default:
return nil
}
-
- // nolint: exhaustive
- switch obj.Type() {
- case objectSDK.TypeTombstone:
- tombstone := objectSDK.NewTombstone()
-
- if err := tombstone.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("could not unmarshal tombstone content: %w", err)
- }
-
- tombAddr := object.AddressOf(obj)
- memberIDs := tombstone.Members()
- tombMembers := make([]oid.Address, 0, len(memberIDs))
-
- for i := range memberIDs {
- a := tombAddr
- a.SetObject(memberIDs[i])
-
- tombMembers = append(tombMembers, a)
- }
-
- var inhumePrm meta.InhumePrm
-
- inhumePrm.SetTombstoneAddress(tombAddr)
- inhumePrm.SetAddresses(tombMembers...)
-
- _, err = s.metaBase.Inhume(inhumePrm)
- if err != nil {
- return fmt.Errorf("could not inhume objects: %w", err)
- }
- case objectSDK.TypeLock:
- var lock objectSDK.Lock
- if err := lock.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("could not unmarshal lock content: %w", err)
- }
-
- locked := make([]oid.ID, lock.NumberOfMembers())
- lock.ReadMembers(locked)
-
- cnr, _ := obj.ContainerID()
- id, _ := obj.ID()
- err = s.metaBase.Lock(cnr, id, locked)
- if err != nil {
- return fmt.Errorf("could not lock objects: %w", err)
- }
- }
-
- var mPrm meta.PutPrm
- mPrm.SetObject(obj)
- mPrm.SetStorageID(descriptor)
-
- _, err := s.metaBase.Put(mPrm)
- if err != nil && !meta.IsErrRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
- return err
- }
-
- return nil
})
+
+ egErr := eg.Wait()
+
+ err = errors.Join(egErr, itErr)
if err != nil {
- return fmt.Errorf("could not put objects to the meta: %w", err)
+ return fmt.Errorf("put objects to the meta: %w", err)
}
err = s.metaBase.SyncCounters()
if err != nil {
- return fmt.Errorf("could not sync object counters: %w", err)
+ return fmt.Errorf("sync object counters: %w", err)
}
+ success = true
+ s.metricsWriter.SetRefillPercent(path, 100)
+ return nil
+}
+
+func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error {
+ obj := objectSDK.New()
+ if err := obj.Unmarshal(data); err != nil {
+ s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
+ zap.Stringer("address", addr),
+ zap.Error(err))
+ return nil
+ }
+
+ hasIndexedAttribute := slices.IndexFunc(obj.Attributes(), func(attr objectSDK.Attribute) bool { return meta.IsAtrributeIndexed(attr.Key()) }) > 0
+
+ var isIndexedContainer bool
+ if hasIndexedAttribute {
+ info, err := s.containerInfo.Info(ctx, addr.Container())
+ if err != nil {
+ return err
+ }
+ if info.Removed {
+ s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
+ return nil
+ }
+ isIndexedContainer = info.Indexed
+ }
+
+ var err error
+ switch obj.Type() {
+ case objectSDK.TypeTombstone:
+ err = s.refillTombstoneObject(ctx, obj)
+ case objectSDK.TypeLock:
+ err = s.refillLockObject(ctx, obj)
+ default:
+ }
+ if err != nil {
+ return err
+ }
+
+ var mPrm meta.PutPrm
+ mPrm.SetObject(obj)
+ mPrm.SetStorageID(descriptor)
+ mPrm.SetIndexAttributes(hasIndexedAttribute && isIndexedContainer)
+
+ _, err = s.metaBase.Put(ctx, mPrm)
+ if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
+ return err
+ }
+ return nil
+}
+
+func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
+ var lock objectSDK.Lock
+ if err := lock.Unmarshal(obj.Payload()); err != nil {
+ return fmt.Errorf("unmarshal lock content: %w", err)
+ }
+
+ locked := make([]oid.ID, lock.NumberOfMembers())
+ lock.ReadMembers(locked)
+
+ cnr, _ := obj.ContainerID()
+ id, _ := obj.ID()
+ err := s.metaBase.Lock(ctx, cnr, id, locked)
+ if err != nil {
+ return fmt.Errorf("lock objects: %w", err)
+ }
+ return nil
+}
+
+func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object) error {
+ tombstone := objectSDK.NewTombstone()
+
+ if err := tombstone.Unmarshal(obj.Payload()); err != nil {
+ return fmt.Errorf("unmarshal tombstone content: %w", err)
+ }
+
+ tombAddr := object.AddressOf(obj)
+ memberIDs := tombstone.Members()
+ tombMembers := make([]oid.Address, 0, len(memberIDs))
+
+ for i := range memberIDs {
+ a := tombAddr
+ a.SetObject(memberIDs[i])
+
+ tombMembers = append(tombMembers, a)
+ }
+
+ var inhumePrm meta.InhumePrm
+
+ inhumePrm.SetTombstoneAddress(tombAddr)
+ inhumePrm.SetAddresses(tombMembers...)
+
+ _, err := s.metaBase.Inhume(ctx, inhumePrm)
+ if err != nil {
+ return fmt.Errorf("inhume objects: %w", err)
+ }
return nil
}
// Close releases all Shard's components.
-func (s *Shard) Close() error {
- components := []interface{ Close() error }{}
+func (s *Shard) Close(ctx context.Context) error {
+ unlock := s.lockExclusive()
+ if s.rb != nil {
+ s.rb.Stop(ctx, s.log)
+ }
+ var components []interface{ Close(context.Context) error }
if s.pilorama != nil {
components = append(components, s.pilorama)
}
if s.hasWriteCache() {
+ prev := s.writecacheSealCancel.Swap(notInitializedCancel)
+ prev.cancel() // no need to wait: writecache.Seal and writecache.Close lock the same mutex
components = append(components, s.writeCache)
}
@@ -257,15 +383,23 @@ func (s *Shard) Close() error {
var lastErr error
for _, component := range components {
- if err := component.Close(); err != nil {
+ if err := component.Close(ctx); err != nil {
lastErr = err
- s.log.Error("could not close shard component", zap.Error(err))
+ s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err))
}
}
+ if s.opsLimiter != nil {
+ s.opsLimiter.Close()
+ }
+
+ unlock()
+
+ // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock.
+ // So to prevent deadlock GC stopping is outside of exclusive lock.
// If Init/Open was unsuccessful gc can be nil.
if s.gc != nil {
- s.gc.stop()
+ s.gc.stop(ctx)
}
return lastErr
@@ -274,21 +408,31 @@ func (s *Shard) Close() error {
// Reload reloads configuration portions that are necessary.
// If a config option is invalid, it logs an error and returns nil.
// If there was a problem with applying new configuration, an error is returned.
-func (s *Shard) Reload(opts ...Option) error {
+func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Reload")
+ defer span.End()
+
// Do not use defaultCfg here missing options need not be reloaded.
var c cfg
for i := range opts {
opts[i](&c)
}
- s.m.Lock()
- defer s.m.Unlock()
+ unlock := s.lockExclusive()
+ defer unlock()
- ok, err := s.metaBase.Reload(c.metaOpts...)
+ s.rb.Stop(ctx, s.log)
+ if !s.info.Mode.NoMetabase() {
+ defer func() {
+ s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
+ }()
+ }
+
+ ok, err := s.metaBase.Reload(ctx, c.metaOpts...)
if err != nil {
if errors.Is(err, meta.ErrDegradedMode) {
- s.log.Error("can't open metabase, move to a degraded mode", zap.Error(err))
- _ = s.setMode(mode.DegradedReadOnly)
+ s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
+ _ = s.setMode(ctx, mode.DegradedReadOnly)
}
return err
}
@@ -298,17 +442,43 @@ func (s *Shard) Reload(opts ...Option) error {
// Here we refill metabase only if a new instance was opened. This is a feature,
// we don't want to hang for some time just because we forgot to change
// config after the node was updated.
- err = s.refillMetabase()
+ err = s.refillMetabase(ctx)
} else {
- err = s.metaBase.Init()
+ err = s.metaBase.Init(ctx)
}
if err != nil {
- s.log.Error("can't initialize metabase, move to a degraded-read-only mode", zap.Error(err))
- _ = s.setMode(mode.DegradedReadOnly)
+ s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
+ _ = s.setMode(ctx, mode.DegradedReadOnly)
return err
}
}
+ if err := s.setMode(ctx, c.info.Mode); err != nil {
+ return err
+ }
+ s.reloadOpsLimiter(&c)
- s.log.Info("trying to restore read-write mode")
- return s.setMode(mode.ReadWrite)
+ return nil
+}
+
+func (s *Shard) reloadOpsLimiter(c *cfg) {
+ if c.configOpsLimiter != nil {
+ old := s.opsLimiter.ptr.Swap(&qosLimiterHolder{Limiter: c.configOpsLimiter})
+ old.Close()
+ s.opsLimiter.SetParentID(s.info.ID.String())
+ }
+}
+
+func (s *Shard) lockExclusive() func() {
+ s.setModeRequested.Store(true)
+ val := s.gcCancel.Load()
+ if val != nil {
+ cancelGC := val.(context.CancelFunc)
+ cancelGC()
+ }
+ if c := s.writecacheSealCancel.Load(); c != nil {
+ c.cancel()
+ }
+ s.m.Lock()
+ s.setModeRequested.Store(false)
+ return s.m.Unlock
}
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index 1c258cb8c4..6d2cd71370 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -1,19 +1,25 @@
package shard
import (
+ "context"
+ "fmt"
+ "io/fs"
+ "math"
"os"
"path/filepath"
+ "sync/atomic"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
@@ -22,70 +28,90 @@ import (
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
"github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
+ "go.etcd.io/bbolt"
)
-type epochState struct{}
-
-func (s epochState) CurrentEpoch() uint64 {
- return 0
-}
-
type objAddr struct {
obj *objectSDK.Object
addr oid.Address
}
func TestShardOpen(t *testing.T) {
+ t.Parallel()
+
dir := t.TempDir()
metaPath := filepath.Join(dir, "meta")
+ st := teststore.New(teststore.WithSubstorage(fstree.New(
+ fstree.WithDirNameLen(2),
+ fstree.WithPath(filepath.Join(dir, "blob")),
+ fstree.WithDepth(1)),
+ ))
+
+ var allowedMode atomic.Int64
+ openFileMetabase := func(p string, f int, perm fs.FileMode) (*os.File, error) {
+ const modeMask = os.O_RDONLY | os.O_RDWR | os.O_WRONLY
+ if int64(f&modeMask) == allowedMode.Load() {
+ return os.OpenFile(p, f, perm)
+ }
+ return nil, fs.ErrPermission
+ }
+
+ wcOpts := []writecache.Option{
+ writecache.WithPath(filepath.Join(dir, "wc")),
+ }
+
newShard := func() *Shard {
return New(
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ WithID(NewIDFromBytes([]byte{})),
+ WithLogger(test.NewLogger(t)),
WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
- {
- Storage: fstree.New(
- fstree.WithDirNameLen(2),
- fstree.WithPath(filepath.Join(dir, "blob")),
- fstree.WithDepth(1)),
- },
+ {Storage: st},
})),
- WithMetaBaseOptions(meta.WithPath(metaPath), meta.WithEpochState(epochState{})),
+ WithMetaBaseOptions(
+ meta.WithPath(metaPath),
+ meta.WithEpochState(epochState{}),
+ meta.WithBoltDBOptions(&bbolt.Options{OpenFile: openFileMetabase}),
+ ),
WithPiloramaOptions(
pilorama.WithPath(filepath.Join(dir, "pilorama"))),
WithWriteCache(true),
- WithWriteCacheOptions(
- writecache.WithPath(filepath.Join(dir, "wc"))))
+ WithWriteCacheOptions(wcOpts))
}
+ allowedMode.Store(int64(os.O_RDWR))
+
sh := newShard()
- require.NoError(t, sh.Open())
- require.NoError(t, sh.Init())
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadWrite, sh.GetMode())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
// Metabase can be opened in read-only => start in ReadOnly mode.
- require.NoError(t, os.Chmod(metaPath, 0444))
+ allowedMode.Store(int64(os.O_RDONLY))
+
sh = newShard()
- require.NoError(t, sh.Open())
- require.NoError(t, sh.Init())
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.Error(t, sh.SetMode(mode.ReadWrite))
+ require.Error(t, sh.SetMode(context.Background(), mode.ReadWrite))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
// Metabase is corrupted => start in DegradedReadOnly mode.
- require.NoError(t, os.Chmod(metaPath, 0000))
+ allowedMode.Store(math.MaxInt64)
+
sh = newShard()
- require.NoError(t, sh.Open())
- require.NoError(t, sh.Init())
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.DegradedReadOnly, sh.GetMode())
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
}
func TestRefillMetabaseCorrupted(t *testing.T) {
+ t.Parallel()
+
dir := t.TempDir()
fsTree := fstree.New(
@@ -100,12 +126,17 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}),
}
+ mm := newMetricStore()
+
sh := New(
+ WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))),
- WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{})))
- require.NoError(t, sh.Open())
- require.NoError(t, sh.Init())
+ WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{})),
+ WithMetricsWriter(mm),
+ )
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
obj := objecttest.Object()
obj.SetType(objectSDK.TypeRegular)
@@ -113,30 +144,38 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
var putPrm PutPrm
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- require.NoError(t, sh.Close())
+ require.NoError(t, sh.Close(context.Background()))
addr := object.AddressOf(obj)
- _, err = fsTree.Put(common.PutPrm{Address: addr, RawData: []byte("not an object")})
- require.NoError(t, err)
+ // This is copied from `fstree.treePath()` to avoid exporting function just for tests.
+ {
+ saddr := addr.Object().EncodeToString() + "." + addr.Container().EncodeToString()
+ p := fmt.Sprintf("%s/%s/%s", fsTree.RootPath, saddr[:2], saddr[2:])
+ require.NoError(t, os.WriteFile(p, []byte("not an object"), fsTree.Permissions))
+ }
sh = New(
+ WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))),
WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta_new")), meta.WithEpochState(epochState{})),
- WithRefillMetabase(true))
- require.NoError(t, sh.Open())
- require.NoError(t, sh.Init())
+ WithRefillMetabase(true),
+ WithMetricsWriter(mm))
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
var getPrm GetPrm
getPrm.SetAddress(addr)
- _, err = sh.Get(getPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
- require.NoError(t, sh.Close())
+ _, err = sh.Get(context.Background(), getPrm)
+ require.True(t, client.IsErrObjectNotFound(err))
+ require.NoError(t, sh.Close(context.Background()))
}
func TestRefillMetabase(t *testing.T) {
+ t.Parallel()
+
p := t.Name()
defer os.RemoveAll(p)
@@ -151,7 +190,10 @@ func TestRefillMetabase(t *testing.T) {
}),
}
+ mm := newMetricStore()
+
sh := New(
+ WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithMetaBaseOptions(
meta.WithPath(filepath.Join(p, "meta")),
@@ -159,13 +201,14 @@ func TestRefillMetabase(t *testing.T) {
),
WithPiloramaOptions(
pilorama.WithPath(filepath.Join(p, "pilorama"))),
+ WithMetricsWriter(mm),
)
// open Blobstor
- require.NoError(t, sh.Open())
+ require.NoError(t, sh.Open(context.Background()))
// initialize Blobstor
- require.NoError(t, sh.Init())
+ require.NoError(t, sh.Init(context.Background()))
const objNum = 5
@@ -173,7 +216,7 @@ func TestRefillMetabase(t *testing.T) {
locked := make([]oid.ID, 1, 2)
locked[0] = oidtest.ID()
cnrLocked := cidtest.ID()
- for i := uint64(0); i < objNum; i++ {
+ for range objNum {
obj := objecttest.Object()
obj.SetType(objectSDK.TypeRegular)
@@ -218,13 +261,13 @@ func TestRefillMetabase(t *testing.T) {
for _, v := range mObjs {
putPrm.SetObject(v.obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
}
putPrm.SetObject(tombObj)
- _, err = sh.Put(putPrm)
+ _, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// LOCK object handling
@@ -236,16 +279,16 @@ func TestRefillMetabase(t *testing.T) {
objectSDK.WriteLock(lockObj, lock)
putPrm.SetObject(lockObj)
- _, err = sh.Put(putPrm)
+ _, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
lockID, _ := lockObj.ID()
- require.NoError(t, sh.Lock(cnrLocked, lockID, locked))
+ require.NoError(t, sh.Lock(context.Background(), cnrLocked, lockID, locked))
var inhumePrm InhumePrm
inhumePrm.SetTarget(object.AddressOf(tombObj), tombMembers...)
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var headPrm HeadPrm
@@ -253,10 +296,10 @@ func TestRefillMetabase(t *testing.T) {
checkObj := func(addr oid.Address, expObj *objectSDK.Object) {
headPrm.SetAddress(addr)
- res, err := sh.Head(headPrm)
+ res, err := sh.Head(context.Background(), headPrm)
if expObj == nil {
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, client.IsErrObjectNotFound(err))
return
}
@@ -278,12 +321,12 @@ func TestRefillMetabase(t *testing.T) {
for _, member := range tombMembers {
headPrm.SetAddress(member)
- _, err := sh.Head(headPrm)
+ _, err := sh.Head(context.Background(), headPrm)
if exists {
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
} else {
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, client.IsErrObjectNotFound(err))
}
}
}
@@ -298,9 +341,9 @@ func TestRefillMetabase(t *testing.T) {
var prm InhumePrm
prm.MarkAsGarbage(addr)
- _, err := sh.Inhume(prm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked),
- "object %s should be locked", locked[i])
+ var target *apistatus.ObjectLocked
+ _, err := sh.Inhume(context.Background(), prm)
+ require.ErrorAs(t, err, &target, "object %s should be locked", locked[i])
}
}
@@ -312,13 +355,14 @@ func TestRefillMetabase(t *testing.T) {
c, err := sh.metaBase.ObjectCounters()
require.NoError(t, err)
- phyBefore := c.Phy()
- logicalBefore := c.Logic()
+ phyBefore := c.Phy
+ logicalBefore := c.Logic
- err = sh.Close()
+ err = sh.Close(context.Background())
require.NoError(t, err)
sh = New(
+ WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithMetaBaseOptions(
meta.WithPath(filepath.Join(p, "meta_restored")),
@@ -326,31 +370,35 @@ func TestRefillMetabase(t *testing.T) {
),
WithPiloramaOptions(
pilorama.WithPath(filepath.Join(p, "pilorama_another"))),
+ WithMetricsWriter(mm),
)
// open Blobstor
- require.NoError(t, sh.Open())
+ require.NoError(t, sh.Open(context.Background()))
// initialize Blobstor
- require.NoError(t, sh.Init())
+ require.NoError(t, sh.Init(context.Background()))
- defer sh.Close()
+ defer sh.Close(context.Background())
checkAllObjs(false)
checkObj(object.AddressOf(tombObj), nil)
checkTombMembers(false)
- err = sh.refillMetabase()
+ err = sh.refillMetabase(context.Background())
require.NoError(t, err)
c, err = sh.metaBase.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, phyBefore, c.Phy())
- require.Equal(t, logicalBefore, c.Logic())
+ require.Equal(t, phyBefore, c.Phy)
+ require.Equal(t, logicalBefore, c.Logic)
checkAllObjs(true)
checkObj(object.AddressOf(tombObj), tombObj)
checkTombMembers(true)
checkLocked(t, cnrLocked, locked)
+ require.Equal(t, int64(len(mObjs)+2), mm.refillCount) // 1 lock + 1 tomb
+ require.Equal(t, "completed", mm.refillStatus)
+ require.Equal(t, uint32(100), mm.refillPercent)
}
diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go
new file mode 100644
index 0000000000..8dc1f05225
--- /dev/null
+++ b/pkg/local_object_storage/shard/count.go
@@ -0,0 +1,37 @@
+package shard
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// LogicalObjectsCount returns logical objects count.
+func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Shard.LogicalObjectsCount",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.GetMode().NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
+
+ cc, err := s.metaBase.ObjectCounters()
+ if err != nil {
+ return 0, err
+ }
+ return cc.Logic, nil
+}
diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go
index 6ae3bf7dd2..0101817a81 100644
--- a/pkg/local_object_storage/shard/delete.go
+++ b/pkg/local_object_storage/shard/delete.go
@@ -1,12 +1,17 @@
package shard
import (
- "errors"
+ "context"
+ "fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -16,7 +21,9 @@ type DeletePrm struct {
}
// DeleteRes groups the resulting values of Delete operation.
-type DeleteRes struct{}
+type DeleteRes struct {
+ deleted uint64
+}
// SetAddresses is a Delete option to set the addresses of the objects to delete.
//
@@ -25,86 +32,130 @@ func (p *DeletePrm) SetAddresses(addr ...oid.Address) {
p.addr = append(p.addr, addr...)
}
-// Delete removes data from the shard's writeCache, metaBase and
-// blobStor.
-func (s *Shard) Delete(prm DeletePrm) (DeleteRes, error) {
+// Delete removes data from the shard's metaBase and// blobStor.
+func (s *Shard) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Delete",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Int("addr_count", len(prm.addr)),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
- return s.delete(prm)
+ return s.delete(ctx, prm, false)
}
-func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
+func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (DeleteRes, error) {
if s.info.Mode.ReadOnly() {
return DeleteRes{}, ErrReadOnlyMode
} else if s.info.Mode.NoMetabase() {
return DeleteRes{}, ErrDegradedMode
}
- ln := len(prm.addr)
-
- smalls := make(map[oid.Address][]byte, ln)
-
- for i := range prm.addr {
- if s.hasWriteCache() {
- err := s.writeCache.Delete(prm.addr[i])
- if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) {
- s.log.Warn("can't delete object from write cache", zap.String("error", err.Error()))
- }
- }
-
- var sPrm meta.StorageIDPrm
- sPrm.SetAddress(prm.addr[i])
-
- res, err := s.metaBase.StorageID(sPrm)
- if err != nil {
- s.log.Debug("can't get storage ID from metabase",
- zap.Stringer("object", prm.addr[i]),
- zap.String("error", err.Error()))
-
- continue
- }
-
- if res.StorageID() != nil {
- smalls[prm.addr[i]] = res.StorageID()
- }
- }
-
- var delPrm meta.DeletePrm
- delPrm.SetAddresses(prm.addr...)
-
- res, err := s.metaBase.Delete(delPrm)
+ release, err := s.opsLimiter.WriteRequest(ctx)
if err != nil {
- return DeleteRes{}, err // stop on metabase error ?
+ return DeleteRes{}, err
}
+ defer release()
- var totalRemovedPayload uint64
-
- s.decObjectCounterBy(physical, res.RawObjectsRemoved())
- s.decObjectCounterBy(logical, res.AvailableObjectsRemoved())
- for i := range prm.addr {
- removedPayload := res.RemovedPhysicalObjectSizes()[i]
- totalRemovedPayload += removedPayload
- logicalRemovedPayload := res.RemovedLogicalObjectSizes()[i]
- if logicalRemovedPayload > 0 {
- s.addToContainerSize(prm.addr[i].Container().EncodeToString(), -int64(logicalRemovedPayload))
+ result := DeleteRes{}
+ for _, addr := range prm.addr {
+ select {
+ case <-ctx.Done():
+ return result, ctx.Err()
+ default:
}
- }
- s.addToPayloadSize(-int64(totalRemovedPayload))
- for i := range prm.addr {
- var delPrm common.DeletePrm
- delPrm.Address = prm.addr[i]
- id := smalls[prm.addr[i]]
- delPrm.StorageID = id
-
- _, err = s.blobStor.Delete(delPrm)
- if err != nil {
- s.log.Debug("can't remove object from blobStor",
- zap.Stringer("object_address", prm.addr[i]),
- zap.String("error", err.Error()))
+ if err := s.validateWritecacheDoesntContainObject(ctx, addr); err != nil {
+ if skipFailed {
+ continue
+ }
+ return result, err
}
+
+ if err := s.deleteFromBlobstor(ctx, addr); err != nil {
+ if skipFailed {
+ continue
+ }
+ return result, err
+ }
+
+ if err := s.deleteFromMetabase(ctx, addr); err != nil {
+ if skipFailed {
+ continue
+ }
+ return result, err
+ }
+ result.deleted++
}
- return DeleteRes{}, nil
+ return result, nil
+}
+
+func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr oid.Address) error {
+ if !s.hasWriteCache() {
+ return nil
+ }
+ _, err := s.writeCache.Head(ctx, addr)
+ if err == nil {
+ s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
+ return fmt.Errorf("object %s must be flushed from writecache", addr)
+ }
+ if client.IsErrObjectNotFound(err) {
+ return nil
+ }
+ return err
+}
+
+func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error {
+ var sPrm meta.StorageIDPrm
+ sPrm.SetAddress(addr)
+
+ res, err := s.metaBase.StorageID(ctx, sPrm)
+ if err != nil {
+ s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
+ zap.Stringer("object", addr),
+ zap.Error(err))
+ return err
+ }
+ storageID := res.StorageID()
+ if storageID == nil {
+ // if storageID is nil it means:
+ // 1. there is no such object
+ // 2. object stored by writecache: should not happen, as `validateWritecacheDoesntContainObject` called before `deleteFromBlobstor`
+ return nil
+ }
+
+ var delPrm common.DeletePrm
+ delPrm.Address = addr
+ delPrm.StorageID = storageID
+
+ _, err = s.blobStor.Delete(ctx, delPrm)
+ if err != nil && !client.IsErrObjectNotFound(err) {
+ s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
+ zap.Stringer("object_address", addr),
+ zap.Error(err))
+ return err
+ }
+ return nil
+}
+
+func (s *Shard) deleteFromMetabase(ctx context.Context, addr oid.Address) error {
+ var delPrm meta.DeletePrm
+ delPrm.SetAddresses(addr)
+
+ res, err := s.metaBase.Delete(ctx, delPrm)
+ if err != nil {
+ return err
+ }
+ s.decObjectCounterBy(physical, res.PhyCount())
+ s.decObjectCounterBy(logical, res.LogicCount())
+ s.decObjectCounterBy(user, res.UserCount())
+ s.decContainerObjectCounter(res.RemovedByCnrID())
+ s.addToContainerSize(addr.Container().EncodeToString(), -int64(res.LogicSize()))
+ s.addToPayloadSize(-int64(res.PhySize()))
+
+ return nil
}
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index b1574ab8bb..c9ce93bc59 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -1,80 +1,71 @@
-package shard_test
+package shard
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
)
-func TestShard_Delete(t *testing.T) {
- t.Run("without write cache", func(t *testing.T) {
- testShardDelete(t, false)
+func TestShard_Delete_SmallObject(t *testing.T) {
+ t.Run("small object without write cache", func(t *testing.T) {
+ t.Parallel()
+ testShard(t, false, 1<<5)
})
- t.Run("with write cache", func(t *testing.T) {
- testShardDelete(t, true)
+ t.Run("small object with write cache", func(t *testing.T) {
+ t.Parallel()
+ testShard(t, true, 1<<5)
})
}
-func testShardDelete(t *testing.T, hasWriteCache bool) {
+func TestShard_Delete_BigObject(t *testing.T) {
+ t.Run("big object without write cache", func(t *testing.T) {
+ t.Parallel()
+ testShard(t, false, 1<<20)
+ })
+
+ t.Run("big object with write cache", func(t *testing.T) {
+ t.Parallel()
+ testShard(t, true, 1<<20)
+ })
+}
+
+func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
sh := newShard(t, hasWriteCache)
- defer releaseShard(sh, t)
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
- obj := generateObjectWithCID(t, cnr)
- addAttribute(obj, "foo", "bar")
+ obj := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj, "foo", "bar")
+ testutil.AddPayload(obj, payloadSize)
- var putPrm shard.PutPrm
- var getPrm shard.GetPrm
+ var putPrm PutPrm
+ putPrm.SetObject(obj)
- t.Run("big object", func(t *testing.T) {
- addPayload(obj, 1<<20)
+ var getPrm GetPrm
+ getPrm.SetAddress(object.AddressOf(obj))
- putPrm.SetObject(obj)
- getPrm.SetAddress(object.AddressOf(obj))
+ var delPrm DeletePrm
+ delPrm.SetAddresses(object.AddressOf(obj))
- var delPrm shard.DeletePrm
- delPrm.SetAddresses(object.AddressOf(obj))
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
- _, err := sh.Put(putPrm)
- require.NoError(t, err)
+ _, err = sh.Get(context.Background(), getPrm)
+ require.NoError(t, err)
- _, err = testGet(t, sh, getPrm, hasWriteCache)
- require.NoError(t, err)
+ if hasWriteCache {
+ require.NoError(t, sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false}))
+ }
+ _, err = sh.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
- _, err = sh.Delete(delPrm)
- require.NoError(t, err)
-
- _, err = sh.Get(getPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
- })
-
- t.Run("small object", func(t *testing.T) {
- obj := generateObjectWithCID(t, cnr)
- addAttribute(obj, "foo", "bar")
- addPayload(obj, 1<<5)
-
- putPrm.SetObject(obj)
- getPrm.SetAddress(object.AddressOf(obj))
-
- var delPrm shard.DeletePrm
- delPrm.SetAddresses(object.AddressOf(obj))
-
- _, err := sh.Put(putPrm)
- require.NoError(t, err)
-
- _, err = sh.Get(getPrm)
- require.NoError(t, err)
-
- _, err = sh.Delete(delPrm)
- require.NoError(t, err)
-
- _, err = sh.Get(getPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
- })
+ _, err = sh.Get(context.Background(), getPrm)
+ require.True(t, client.IsErrObjectNotFound(err))
}
diff --git a/pkg/local_object_storage/shard/dump.go b/pkg/local_object_storage/shard/dump.go
deleted file mode 100644
index 8d9fe0f711..0000000000
--- a/pkg/local_object_storage/shard/dump.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package shard
-
-import (
- "encoding/binary"
- "io"
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
-)
-
-var dumpMagic = []byte("NEOF")
-
-// DumpPrm groups the parameters of Dump operation.
-type DumpPrm struct {
- path string
- stream io.Writer
- ignoreErrors bool
-}
-
-// WithPath is an Dump option to set the destination path.
-func (p *DumpPrm) WithPath(path string) {
- p.path = path
-}
-
-// WithStream is an Dump option to set the destination stream.
-// It takes priority over `path` option.
-func (p *DumpPrm) WithStream(r io.Writer) {
- p.stream = r
-}
-
-// WithIgnoreErrors is an Dump option to allow ignore all errors during iteration.
-// This includes invalid blobovniczas as well as corrupted objects.
-func (p *DumpPrm) WithIgnoreErrors(ignore bool) {
- p.ignoreErrors = ignore
-}
-
-// DumpRes groups the result fields of Dump operation.
-type DumpRes struct {
- count int
-}
-
-// Count return amount of object written.
-func (r DumpRes) Count() int {
- return r.count
-}
-
-var ErrMustBeReadOnly = logicerr.New("shard must be in read-only mode")
-
-// Dump dumps all objects from the shard to a file or stream.
-//
-// Returns any error encountered.
-func (s *Shard) Dump(prm DumpPrm) (DumpRes, error) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if !s.info.Mode.ReadOnly() {
- return DumpRes{}, ErrMustBeReadOnly
- }
-
- w := prm.stream
- if w == nil {
- f, err := os.OpenFile(prm.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0640)
- if err != nil {
- return DumpRes{}, err
- }
- defer f.Close()
-
- w = f
- }
-
- _, err := w.Write(dumpMagic)
- if err != nil {
- return DumpRes{}, err
- }
-
- var count int
-
- if s.hasWriteCache() {
- var iterPrm writecache.IterationPrm
-
- iterPrm.WithIgnoreErrors(prm.ignoreErrors)
- iterPrm.WithHandler(func(data []byte) error {
- var size [4]byte
- binary.LittleEndian.PutUint32(size[:], uint32(len(data)))
- if _, err := w.Write(size[:]); err != nil {
- return err
- }
-
- if _, err := w.Write(data); err != nil {
- return err
- }
-
- count++
- return nil
- })
-
- err := s.writeCache.Iterate(iterPrm)
- if err != nil {
- return DumpRes{}, err
- }
- }
-
- var pi common.IteratePrm
- pi.IgnoreErrors = prm.ignoreErrors
- pi.Handler = func(elem common.IterationElement) error {
- data := elem.ObjectData
-
- var size [4]byte
- binary.LittleEndian.PutUint32(size[:], uint32(len(data)))
- if _, err := w.Write(size[:]); err != nil {
- return err
- }
-
- if _, err := w.Write(data); err != nil {
- return err
- }
-
- count++
- return nil
- }
-
- if _, err := s.blobStor.Iterate(pi); err != nil {
- return DumpRes{}, err
- }
-
- return DumpRes{count: count}, nil
-}
diff --git a/pkg/local_object_storage/shard/dump_test.go b/pkg/local_object_storage/shard/dump_test.go
deleted file mode 100644
index 96802d4abf..0000000000
--- a/pkg/local_object_storage/shard/dump_test.go
+++ /dev/null
@@ -1,410 +0,0 @@
-package shard_test
-
-import (
- "bytes"
- "io"
- "math/rand"
- "os"
- "path/filepath"
- "testing"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/klauspost/compress/zstd"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
-)
-
-func TestDump(t *testing.T) {
- t.Run("without write-cache", func(t *testing.T) {
- testDump(t, 10, false)
- })
- t.Run("with write-cache", func(t *testing.T) {
- // Put a bit more objects to write-cache to facilitate race-conditions.
- testDump(t, 100, true)
- })
-}
-
-func testDump(t *testing.T, objCount int, hasWriteCache bool) {
- const (
- wcSmallObjectSize = 1024 // 1 KiB, goes to write-cache memory
- wcBigObjectSize = 4 * 1024 // 4 KiB, goes to write-cache FSTree
- bsSmallObjectSize = 10 * 1024 // 10 KiB, goes to blobovnicza DB
- bsBigObjectSize = 1024*1024 + 1 // > 1 MiB, goes to blobovnicza FSTree
- )
-
- var sh *shard.Shard
- if !hasWriteCache {
- sh = newShard(t, false)
- } else {
- sh = newCustomShard(t, t.TempDir(), true,
- []writecache.Option{
- writecache.WithSmallObjectSize(wcSmallObjectSize),
- writecache.WithMaxObjectSize(wcBigObjectSize),
- writecache.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- },
- nil)
- }
- defer releaseShard(sh, t)
-
- out := filepath.Join(t.TempDir(), "dump")
- var prm shard.DumpPrm
- prm.WithPath(out)
-
- t.Run("must be read-only", func(t *testing.T) {
- _, err := sh.Dump(prm)
- require.ErrorIs(t, err, shard.ErrMustBeReadOnly)
- })
-
- require.NoError(t, sh.SetMode(mode.ReadOnly))
- outEmpty := out + ".empty"
- var dumpPrm shard.DumpPrm
- dumpPrm.WithPath(outEmpty)
-
- res, err := sh.Dump(dumpPrm)
- require.NoError(t, err)
- require.Equal(t, 0, res.Count())
- require.NoError(t, sh.SetMode(mode.ReadWrite))
-
- // Approximate object header size.
- const headerSize = 400
-
- objects := make([]*objectSDK.Object, objCount)
- for i := 0; i < objCount; i++ {
- cnr := cidtest.ID()
- var size int
- switch i % 6 {
- case 0, 1:
- size = wcSmallObjectSize - headerSize
- case 2, 3:
- size = bsSmallObjectSize - headerSize
- case 4:
- size = wcBigObjectSize - headerSize
- default:
- size = bsBigObjectSize - headerSize
- }
- data := make([]byte, size)
- rand.Read(data)
- obj := generateObjectWithPayload(cnr, data)
- objects[i] = obj
-
- var prm shard.PutPrm
- prm.SetObject(objects[i])
- _, err := sh.Put(prm)
- require.NoError(t, err)
- }
-
- require.NoError(t, sh.SetMode(mode.ReadOnly))
-
- t.Run("invalid path", func(t *testing.T) {
- var dumpPrm shard.DumpPrm
- dumpPrm.WithPath("\x00")
-
- _, err := sh.Dump(dumpPrm)
- require.Error(t, err)
- })
-
- res, err = sh.Dump(prm)
- require.NoError(t, err)
- require.Equal(t, objCount, res.Count())
-
- t.Run("restore", func(t *testing.T) {
- sh := newShard(t, false)
- defer releaseShard(sh, t)
-
- t.Run("empty dump", func(t *testing.T) {
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(outEmpty)
- res, err := sh.Restore(restorePrm)
- require.NoError(t, err)
- require.Equal(t, 0, res.Count())
- })
-
- t.Run("invalid path", func(t *testing.T) {
- _, err := sh.Restore(*new(shard.RestorePrm))
- require.ErrorIs(t, err, os.ErrNotExist)
- })
-
- t.Run("invalid file", func(t *testing.T) {
- t.Run("invalid magic", func(t *testing.T) {
- out := out + ".wrongmagic"
- require.NoError(t, os.WriteFile(out, []byte{0, 0, 0, 0}, os.ModePerm))
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
-
- _, err := sh.Restore(restorePrm)
- require.ErrorIs(t, err, shard.ErrInvalidMagic)
- })
-
- fileData, err := os.ReadFile(out)
- require.NoError(t, err)
-
- t.Run("incomplete size", func(t *testing.T) {
- out := out + ".wrongsize"
- fileData := append(fileData, 1)
- require.NoError(t, os.WriteFile(out, fileData, os.ModePerm))
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
-
- _, err := sh.Restore(restorePrm)
- require.ErrorIs(t, err, io.ErrUnexpectedEOF)
- })
- t.Run("incomplete object data", func(t *testing.T) {
- out := out + ".wrongsize"
- fileData := append(fileData, 1, 0, 0, 0)
- require.NoError(t, os.WriteFile(out, fileData, os.ModePerm))
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
-
- _, err := sh.Restore(restorePrm)
- require.ErrorIs(t, err, io.EOF)
- })
- t.Run("invalid object", func(t *testing.T) {
- out := out + ".wrongobj"
- fileData := append(fileData, 1, 0, 0, 0, 0xFF, 4, 0, 0, 0, 1, 2, 3, 4)
- require.NoError(t, os.WriteFile(out, fileData, os.ModePerm))
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
-
- _, err := sh.Restore(restorePrm)
- require.Error(t, err)
-
- t.Run("skip errors", func(t *testing.T) {
- sh := newCustomShard(t, filepath.Join(t.TempDir(), "ignore"), false, nil, nil)
- t.Cleanup(func() { require.NoError(t, sh.Close()) })
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
- restorePrm.WithIgnoreErrors(true)
-
- res, err := sh.Restore(restorePrm)
- require.NoError(t, err)
- require.Equal(t, objCount, res.Count())
- require.Equal(t, 2, res.FailCount())
- })
- })
- })
-
- var prm shard.RestorePrm
- prm.WithPath(out)
- t.Run("must allow write", func(t *testing.T) {
- require.NoError(t, sh.SetMode(mode.ReadOnly))
-
- _, err := sh.Restore(prm)
- require.ErrorIs(t, err, shard.ErrReadOnlyMode)
- })
-
- require.NoError(t, sh.SetMode(mode.ReadWrite))
-
- checkRestore(t, sh, prm, objects)
- })
-}
-
-func TestStream(t *testing.T) {
- sh1 := newCustomShard(t, filepath.Join(t.TempDir(), "shard1"), false, nil, nil)
- defer releaseShard(sh1, t)
-
- sh2 := newCustomShard(t, filepath.Join(t.TempDir(), "shard2"), false, nil, nil)
- defer releaseShard(sh2, t)
-
- const objCount = 5
- objects := make([]*objectSDK.Object, objCount)
- for i := 0; i < objCount; i++ {
- cnr := cidtest.ID()
- obj := generateObjectWithCID(t, cnr)
- objects[i] = obj
-
- var prm shard.PutPrm
- prm.SetObject(objects[i])
- _, err := sh1.Put(prm)
- require.NoError(t, err)
- }
-
- require.NoError(t, sh1.SetMode(mode.ReadOnly))
-
- r, w := io.Pipe()
- finish := make(chan struct{})
-
- go func() {
- var dumpPrm shard.DumpPrm
- dumpPrm.WithStream(w)
-
- res, err := sh1.Dump(dumpPrm)
- require.NoError(t, err)
- require.Equal(t, objCount, res.Count())
- require.NoError(t, w.Close())
- close(finish)
- }()
-
- var restorePrm shard.RestorePrm
- restorePrm.WithStream(r)
-
- checkRestore(t, sh2, restorePrm, objects)
- require.Eventually(t, func() bool {
- select {
- case <-finish:
- return true
- default:
- return false
- }
- }, time.Second, time.Millisecond)
-}
-
-func checkRestore(t *testing.T, sh *shard.Shard, prm shard.RestorePrm, objects []*objectSDK.Object) {
- res, err := sh.Restore(prm)
- require.NoError(t, err)
- require.Equal(t, len(objects), res.Count())
-
- var getPrm shard.GetPrm
-
- for i := range objects {
- getPrm.SetAddress(object.AddressOf(objects[i]))
- res, err := sh.Get(getPrm)
- require.NoError(t, err)
- require.Equal(t, objects[i], res.Object())
- }
-}
-
-func TestDumpIgnoreErrors(t *testing.T) {
- const (
- wcSmallObjectSize = 512 // goes to write-cache memory
- wcBigObjectSize = wcSmallObjectSize << 1 // goes to write-cache FSTree
- bsSmallObjectSize = wcSmallObjectSize << 2 // goes to blobovnicza DB
-
- objCount = 10
- headerSize = 400
- )
-
- dir := t.TempDir()
- bsPath := filepath.Join(dir, "blob")
- bsOpts := func(sw uint64) []blobstor.Option {
- return []blobstor.Option{
- blobstor.WithCompressObjects(true),
- blobstor.WithStorages([]blobstor.SubStorage{
- {
- Storage: blobovniczatree.NewBlobovniczaTree(
- blobovniczatree.WithRootPath(filepath.Join(bsPath, "blobovnicza")),
- blobovniczatree.WithBlobovniczaShallowDepth(1),
- blobovniczatree.WithBlobovniczaShallowWidth(sw),
- blobovniczatree.WithOpenedCacheSize(1)),
- Policy: func(_ *objectSDK.Object, data []byte) bool {
- return len(data) < bsSmallObjectSize
- },
- },
- {
- Storage: fstree.New(
- fstree.WithPath(bsPath),
- fstree.WithDepth(1)),
- },
- }),
- }
- }
- wcPath := filepath.Join(dir, "writecache")
- wcOpts := []writecache.Option{
- writecache.WithPath(wcPath),
- writecache.WithSmallObjectSize(wcSmallObjectSize),
- writecache.WithMaxObjectSize(wcBigObjectSize),
- }
- sh := newCustomShard(t, dir, true, wcOpts, bsOpts(2))
-
- objects := make([]*objectSDK.Object, objCount)
- for i := 0; i < objCount; i++ {
- size := (wcSmallObjectSize << (i % 4)) - headerSize
- obj := generateObjectWithPayload(cidtest.ID(), make([]byte, size))
- objects[i] = obj
-
- var prm shard.PutPrm
- prm.SetObject(objects[i])
- _, err := sh.Put(prm)
- require.NoError(t, err)
- }
-
- releaseShard(sh, t)
-
- b := bytes.NewBuffer(nil)
- badObject := make([]byte, 1000)
- enc, err := zstd.NewWriter(b)
- require.NoError(t, err)
- corruptedData := enc.EncodeAll(badObject, nil)
- for i := 4; i < len(corruptedData); i++ {
- corruptedData[i] ^= 0xFF
- }
-
- // There are 3 different types of errors to consider.
- // To setup envirionment we use implementation details so this test must be updated
- // if any of them are changed.
- {
- // 1. Invalid object in fs tree.
- // 1.1. Invalid compressed data.
- addr := cidtest.ID().EncodeToString() + "." + objecttest.ID().EncodeToString()
- dirName := filepath.Join(bsPath, addr[:2])
- require.NoError(t, os.MkdirAll(dirName, os.ModePerm))
- require.NoError(t, os.WriteFile(filepath.Join(dirName, addr[2:]), corruptedData, os.ModePerm))
-
- // 1.2. Unreadable file.
- addr = cidtest.ID().EncodeToString() + "." + objecttest.ID().EncodeToString()
- dirName = filepath.Join(bsPath, addr[:2])
- require.NoError(t, os.MkdirAll(dirName, os.ModePerm))
-
- fname := filepath.Join(dirName, addr[2:])
- require.NoError(t, os.WriteFile(fname, []byte{}, 0))
-
- // 1.3. Unreadable dir.
- require.NoError(t, os.MkdirAll(filepath.Join(bsPath, "ZZ"), 0))
- }
-
- sh = newCustomShard(t, dir, true, wcOpts, bsOpts(3))
- require.NoError(t, sh.SetMode(mode.ReadOnly))
-
- {
- // 2. Invalid object in blobovnicza.
- // 2.1. Invalid blobovnicza.
- bTree := filepath.Join(bsPath, "blobovnicza")
- data := make([]byte, 1024)
- rand.Read(data)
- require.NoError(t, os.WriteFile(filepath.Join(bTree, "0", "2"), data, 0))
-
- // 2.2. Invalid object in valid blobovnicza.
- var prm blobovnicza.PutPrm
- prm.SetAddress(oid.Address{})
- prm.SetMarshaledObject(corruptedData)
- b := blobovnicza.New(blobovnicza.WithPath(filepath.Join(bTree, "1", "2")))
- require.NoError(t, b.Open())
- _, err := b.Put(prm)
- require.NoError(t, err)
- require.NoError(t, b.Close())
- }
-
- {
- // 3. Invalid object in write-cache. Note that because shard is read-only
- // the object won't be flushed.
- addr := cidtest.ID().EncodeToString() + "." + objecttest.ID().EncodeToString()
- dir := filepath.Join(wcPath, addr[:1])
- require.NoError(t, os.MkdirAll(dir, os.ModePerm))
- require.NoError(t, os.WriteFile(filepath.Join(dir, addr[1:]), nil, 0))
- }
-
- out := filepath.Join(t.TempDir(), "out.dump")
- var dumpPrm shard.DumpPrm
- dumpPrm.WithPath(out)
- dumpPrm.WithIgnoreErrors(true)
- res, err := sh.Dump(dumpPrm)
- require.NoError(t, err)
- require.Equal(t, objCount, res.Count())
-}
diff --git a/pkg/local_object_storage/shard/errors.go b/pkg/local_object_storage/shard/errors.go
index 3e5224eb9d..045ad1bba9 100644
--- a/pkg/local_object_storage/shard/errors.go
+++ b/pkg/local_object_storage/shard/errors.go
@@ -4,25 +4,17 @@ import (
"errors"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
-// IsErrNotFound checks if error returned by Shard Get/Head/GetRange method
-// corresponds to missing object.
-func IsErrNotFound(err error) bool {
- return errors.As(err, new(apistatus.ObjectNotFound))
-}
-
-// IsErrRemoved checks if error returned by Shard Exists/Get/Head/GetRange method
-// corresponds to removed object.
-func IsErrRemoved(err error) bool {
- return errors.As(err, new(apistatus.ObjectAlreadyRemoved))
-}
+var ErrShardDisabled = logicerr.New("shard disabled")
// IsErrOutOfRange checks if an error returned by Shard GetRange method
// corresponds to exceeding the object bounds.
func IsErrOutOfRange(err error) bool {
- return errors.As(err, new(apistatus.ObjectOutOfRange))
+ var target *apistatus.ObjectOutOfRange
+ return errors.As(err, &target)
}
// IsErrObjectExpired checks if an error returned by Shard corresponds to
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index 35b9cba9b9..2c11b6b016 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -1,24 +1,30 @@
package shard
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ExistsPrm groups the parameters of Exists operation.
type ExistsPrm struct {
- addr oid.Address
+ // Exists option to set object checked for existence.
+ Address oid.Address
+ // Exists option to set parent object checked for existence.
+ ECParentAddress oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
type ExistsRes struct {
ex bool
-}
-
-// SetAddress is an Exists option to set object checked for existence.
-func (p *ExistsPrm) SetAddress(addr oid.Address) {
- p.addr = addr
+ lc bool
}
// Exists returns the fact that the object is in the shard.
@@ -26,6 +32,11 @@ func (p ExistsRes) Exists() bool {
return p.ex
}
+// Locked returns the fact that the object is locked.
+func (p ExistsRes) Locked() bool {
+ return p.lc
+}
+
// Exists checks if object is presented in shard.
//
// Returns any error encountered that does not allow to
@@ -33,30 +44,53 @@ func (p ExistsRes) Exists() bool {
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been marked as removed.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (s *Shard) Exists(prm ExistsPrm) (ExistsRes, error) {
- var exists bool
- var err error
+// Returns the ErrShardDisabled if the shard is disabled.
+func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Exists",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.Address.EncodeToString()),
+ ))
+ defer span.End()
s.m.RLock()
defer s.m.RUnlock()
+ if s.info.Mode.Disabled() {
+ return ExistsRes{}, ErrShardDisabled
+ } else if s.info.EvacuationInProgress {
+ return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ExistsRes{}, err
+ }
+ defer release()
+
+ var exists bool
+ var locked bool
+
if s.info.Mode.NoMetabase() {
var p common.ExistsPrm
- p.Address = prm.addr
+ p.Address = prm.Address
var res common.ExistsRes
- res, err = s.blobStor.Exists(p)
+ res, err = s.blobStor.Exists(ctx, p)
exists = res.Exists
} else {
var existsPrm meta.ExistsPrm
- existsPrm.SetAddress(prm.addr)
+ existsPrm.SetAddress(prm.Address)
+ existsPrm.SetECParent(prm.ECParentAddress)
var res meta.ExistsRes
- res, err = s.metaBase.Exists(existsPrm)
+ res, err = s.metaBase.Exists(ctx, existsPrm)
exists = res.Exists()
+ locked = res.Locked()
}
return ExistsRes{
ex: exists,
+ lc: locked,
}, err
}
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index a8910561e1..a262a52cbd 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -5,13 +5,23 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ minExpiredWorkers = 2
+ minExpiredBatchSize = 1
)
// TombstoneSource is an interface that checks
@@ -23,43 +33,44 @@ type TombstoneSource interface {
IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool
}
-// Event represents class of external events.
-type Event interface {
- typ() eventType
-}
+type newEpochHandler func(context.Context, uint64)
-type eventType int
-
-const (
- _ eventType = iota
- eventNewEpoch
-)
-
-type newEpoch struct {
- epoch uint64
-}
-
-func (e newEpoch) typ() eventType {
- return eventNewEpoch
-}
-
-// EventNewEpoch returns new epoch event.
-func EventNewEpoch(e uint64) Event {
- return newEpoch{
- epoch: e,
- }
-}
-
-type eventHandler func(context.Context, Event)
-
-type eventHandlers struct {
+type newEpochHandlers struct {
prevGroup sync.WaitGroup
cancelFunc context.CancelFunc
- handlers []eventHandler
+ handlers []newEpochHandler
}
+type gcRunResult struct {
+ success bool
+ deleted uint64
+ failedToDelete uint64
+}
+
+const (
+ objectTypeLock = "lock"
+ objectTypeTombstone = "tombstone"
+ objectTypeRegular = "regular"
+)
+
+type GCMectrics interface {
+ SetShardID(string)
+ AddRunDuration(d time.Duration, success bool)
+ AddDeletedCount(deleted, failed uint64)
+ AddExpiredObjectCollectionDuration(d time.Duration, success bool, objectType string)
+ AddInhumedObjectCount(count uint64, objectType string)
+}
+
+type noopGCMetrics struct{}
+
+func (m *noopGCMetrics) SetShardID(string) {}
+func (m *noopGCMetrics) AddRunDuration(time.Duration, bool) {}
+func (m *noopGCMetrics) AddDeletedCount(uint64, uint64) {}
+func (m *noopGCMetrics) AddExpiredObjectCollectionDuration(time.Duration, bool, string) {}
+func (m *noopGCMetrics) AddInhumedObjectCount(uint64, string) {}
+
type gc struct {
*gcCfg
@@ -69,10 +80,12 @@ type gc struct {
workerPool util.WorkerPool
- remover func()
+ remover func(context.Context) gcRunResult
- eventChan chan Event
- mEventHandler map[eventType]*eventHandlers
+ // newEpochChan is used only for listening for the new epoch event.
+ // It is ok to keep opened, we are listening for context done when writing in it.
+ newEpochChan chan uint64
+ newEpochHandlers *newEpochHandlers
}
type gcCfg struct {
@@ -81,76 +94,100 @@ type gcCfg struct {
log *logger.Logger
workerPoolInit func(int) util.WorkerPool
+
+ expiredCollectorWorkerCount int
+ expiredCollectorBatchSize int
+
+ metrics GCMectrics
+
+ testHookRemover func(ctx context.Context) gcRunResult
}
func defaultGCCfg() gcCfg {
return gcCfg{
removerInterval: 10 * time.Second,
- log: &logger.Logger{Logger: zap.L()},
+ log: logger.NewLoggerWrapper(zap.L()),
workerPoolInit: func(int) util.WorkerPool {
return nil
},
+ metrics: &noopGCMetrics{},
}
}
-func (gc *gc) init() {
- sz := 0
-
- for _, v := range gc.mEventHandler {
- sz += len(v.handlers)
- }
-
- if sz > 0 {
- gc.workerPool = gc.workerPoolInit(sz)
- }
-
+func (gc *gc) init(ctx context.Context) {
+ gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers))
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
gc.wg.Add(2)
- go gc.tickRemover()
- go gc.listenEvents()
+ go gc.tickRemover(ctx)
+ go gc.listenEvents(ctx)
}
-func (gc *gc) listenEvents() {
+func (gc *gc) listenEvents(ctx context.Context) {
defer gc.wg.Done()
for {
- event, ok := <-gc.eventChan
- if !ok {
- gc.log.Warn("stop event listener by closed channel")
+ select {
+ case <-gc.stopChannel:
+ gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel)
return
- }
-
- v, ok := gc.mEventHandler[event.typ()]
- if !ok {
- continue
- }
-
- v.cancelFunc()
- v.prevGroup.Wait()
-
- var ctx context.Context
- ctx, v.cancelFunc = context.WithCancel(context.Background())
-
- v.prevGroup.Add(len(v.handlers))
-
- for i := range v.handlers {
- h := v.handlers[i]
-
- err := gc.workerPool.Submit(func() {
- h(ctx, event)
- v.prevGroup.Done()
- })
- if err != nil {
- gc.log.Warn("could not submit GC job to worker pool",
- zap.String("error", err.Error()),
- )
-
- v.prevGroup.Done()
+ case <-ctx.Done():
+ gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
+ return
+ case event, ok := <-gc.newEpochChan:
+ if !ok {
+ gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
+ return
}
+
+ gc.handleEvent(ctx, event)
}
}
}
-func (gc *gc) tickRemover() {
+func (gc *gc) handleEvent(ctx context.Context, epoch uint64) {
+ gc.newEpochHandlers.cancelFunc()
+ gc.newEpochHandlers.prevGroup.Wait()
+
+ var runCtx context.Context
+ runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx)
+
+ gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers))
+
+ for i := range gc.newEpochHandlers.handlers {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ h := gc.newEpochHandlers.handlers[i]
+
+ err := gc.workerPool.Submit(func() {
+ defer gc.newEpochHandlers.prevGroup.Done()
+ h(runCtx, epoch)
+ })
+ if err != nil {
+ gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
+ zap.Error(err),
+ )
+
+ gc.newEpochHandlers.prevGroup.Done()
+ }
+ }
+}
+
+func (gc *gc) releaseResources(ctx context.Context) {
+ if gc.workerPool != nil {
+ gc.workerPool.Release()
+ }
+
+ // Avoid to close gc.eventChan here,
+ // because it is possible that we are close it earlier than stop writing.
+ // It is ok to keep it opened.
+
+ gc.log.Debug(ctx, logs.ShardGCIsStopped)
+}
+
+func (gc *gc) tickRemover(ctx context.Context) {
defer gc.wg.Done()
timer := time.NewTimer(gc.removerInterval)
@@ -158,35 +195,55 @@ func (gc *gc) tickRemover() {
for {
select {
+ case <-ctx.Done():
+ // Context canceled earlier than we start to close shards.
+ // It make sense to stop collecting garbage by context too.
+ gc.releaseResources(ctx)
+ return
case <-gc.stopChannel:
- if gc.workerPool != nil {
- gc.workerPool.Release()
- }
-
- close(gc.eventChan)
-
- gc.log.Debug("GC is stopped")
+ gc.releaseResources(ctx)
return
case <-timer.C:
- gc.remover()
+ startedAt := time.Now()
+
+ var result gcRunResult
+ if gc.testHookRemover != nil {
+ result = gc.testHookRemover(ctx)
+ } else {
+ result = gc.remover(ctx)
+ }
timer.Reset(gc.removerInterval)
+
+ gc.metrics.AddRunDuration(time.Since(startedAt), result.success)
+ gc.metrics.AddDeletedCount(result.deleted, result.failedToDelete)
}
}
}
-func (gc *gc) stop() {
+func (gc *gc) stop(ctx context.Context) {
gc.onceStop.Do(func() {
- gc.stopChannel <- struct{}{}
+ close(gc.stopChannel)
})
- gc.log.Info("waiting for GC workers to stop...")
+ gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop)
gc.wg.Wait()
+
+ gc.newEpochHandlers.cancelFunc()
+ gc.newEpochHandlers.prevGroup.Wait()
}
// iterates over metabase and deletes objects
// with GC-marked graves.
// Does nothing if shard is in "read-only" mode.
-func (s *Shard) removeGarbage() {
+func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
+ ctx, cancel := context.WithCancel(pctx)
+ defer cancel()
+
+ s.gcCancel.Store(cancel)
+ if s.setModeRequested.Load() {
+ return
+ }
+
s.m.RLock()
defer s.m.RUnlock()
@@ -194,10 +251,58 @@ func (s *Shard) removeGarbage() {
return
}
+ s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted)
+ defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted)
+
+ buf, err := s.getGarbage(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
+ zap.Error(err),
+ )
+
+ return
+ } else if len(buf) == 0 {
+ result.success = true
+ return
+ }
+
+ var deletePrm DeletePrm
+ deletePrm.SetAddresses(buf...)
+
+ // delete accumulated objects
+ res, err := s.delete(ctx, deletePrm, true)
+
+ result.deleted = res.deleted
+ result.failedToDelete = uint64(len(buf)) - res.deleted
+ result.success = true
+
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
+ zap.Error(err),
+ )
+ result.success = false
+ }
+
+ return
+}
+
+func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+
buf := make([]oid.Address, 0, s.rmBatchSize)
var iterPrm meta.GarbageIterationPrm
iterPrm.SetHandler(func(g meta.GarbageObject) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
buf = append(buf, g.Address())
if len(buf) == s.rmBatchSize {
@@ -207,42 +312,76 @@ func (s *Shard) removeGarbage() {
return nil
})
- // iterate over metabase's objects with GC mark
- // (no more than s.rmBatchSize objects)
- err := s.metaBase.IterateOverGarbage(iterPrm)
- if err != nil {
- s.log.Warn("iterator over metabase graveyard failed",
- zap.String("error", err.Error()),
- )
-
- return
- } else if len(buf) == 0 {
- return
+ if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil {
+ return nil, err
}
- var deletePrm DeletePrm
- deletePrm.SetAddresses(buf...)
+ return buf, nil
+}
- // delete accumulated objects
- _, err = s.delete(deletePrm)
- if err != nil {
- s.log.Warn("could not delete the objects",
- zap.String("error", err.Error()),
- )
+func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
+ workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount)
+ batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize)
+ return
+}
- return
+func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
+ var err error
+ startedAt := time.Now()
+
+ defer func() {
+ s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
+ }()
+
+ s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch))
+
+ workersCount, batchSize := s.getExpiredObjectsParameters()
+
+ errGroup, egCtx := errgroup.WithContext(ctx)
+ errGroup.SetLimit(workersCount)
+
+ errGroup.Go(func() error {
+ batch := make([]oid.Address, 0, batchSize)
+ expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
+ if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock {
+ batch = append(batch, o.Address())
+
+ if len(batch) == batchSize {
+ expired := batch
+ errGroup.Go(func() error {
+ s.handleExpiredObjects(egCtx, expired)
+ return egCtx.Err()
+ })
+ batch = make([]oid.Address, 0, batchSize)
+ }
+ }
+ })
+ if expErr != nil {
+ return expErr
+ }
+
+ if len(batch) > 0 {
+ expired := batch
+ errGroup.Go(func() error {
+ s.handleExpiredObjects(egCtx, expired)
+ return egCtx.Err()
+ })
+ }
+
+ return nil
+ })
+
+ if err = errGroup.Wait(); err != nil {
+ s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err))
}
}
-func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
- expired, err := s.getExpiredObjects(ctx, e.(newEpoch).epoch, func(typ object.Type) bool {
- return typ != object.TypeTombstone && typ != object.TypeLock
- })
- if err != nil || len(expired) == 0 {
- if err != nil {
- s.log.Warn("iterator over expired objects failed", zap.String("error", err.Error()))
- }
+func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) {
+ select {
+ case <-ctx.Done():
return
+ default:
}
s.m.RLock()
@@ -252,22 +391,32 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
return
}
- var inhumePrm meta.InhumePrm
+ s.handleExpiredObjectsUnsafe(ctx, expired)
+}
- inhumePrm.SetAddresses(expired...)
- inhumePrm.SetGCMark()
+func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
- // inhume the collected objects
- res, err := s.metaBase.Inhume(inhumePrm)
+ expired, err := s.getExpiredWithLinked(ctx, expired)
if err != nil {
- s.log.Warn("could not inhume the objects",
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
return
}
- s.decObjectCounterBy(logical, res.AvailableInhumed())
+ res, err := s.inhumeGC(ctx, expired)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err))
+ return
+ }
+
+ s.gc.metrics.AddInhumedObjectCount(res.LogicInhumed(), objectTypeRegular)
+ s.decObjectCounterBy(logical, res.LogicInhumed())
+ s.decObjectCounterBy(user, res.UserInhumed())
+ s.decContainerObjectCounter(res.InhumedByCnrID())
i := 0
for i < res.GetDeletionInfoLength() {
@@ -277,11 +426,51 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
}
}
-func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
- epoch := e.(newEpoch).epoch
+func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+
+ result := make([]oid.Address, 0, len(source))
+ parentToChildren, err := s.metaBase.GetChildren(ctx, source)
+ if err != nil {
+ return nil, err
+ }
+ for parent, children := range parentToChildren {
+ result = append(result, parent)
+ result = append(result, children...)
+ }
+
+ return result, nil
+}
+
+func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) {
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return meta.InhumeRes{}, err
+ }
+ defer release()
+
+ var inhumePrm meta.InhumePrm
+ inhumePrm.SetAddresses(addrs...)
+ inhumePrm.SetGCMark()
+ return s.metaBase.Inhume(ctx, inhumePrm)
+}
+
+func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
+ var err error
+ startedAt := time.Now()
+
+ defer func() {
+ s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone)
+ }()
+
log := s.log.With(zap.Uint64("epoch", epoch))
- log.Debug("started expired tombstones handling")
+ log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
+ defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling)
const tssDeleteBatch = 50
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
@@ -299,22 +488,29 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
})
for {
- log.Debug("iterating tombstones")
+ log.Debug(ctx, logs.ShardIteratingTombstones)
s.m.RLock()
if s.info.Mode.NoMetabase() {
- s.log.Debug("shard is in a degraded mode, skip collecting expired tombstones")
+ s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
s.m.RUnlock()
return
}
- err := s.metaBase.IterateOverGraveyard(iterPrm)
+ var release qos.ReleaseFunc
+ release, err = s.opsLimiter.ReadRequest(ctx)
if err != nil {
- log.Error("iterator over graveyard failed", zap.Error(err))
+ log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
+ s.m.RUnlock()
+ return
+ }
+ err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
+ release()
+ if err != nil {
+ log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
s.m.RUnlock()
-
return
}
@@ -331,89 +527,137 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
}
}
- log.Debug("handling expired tombstones batch", zap.Int("number", len(tssExp)))
- s.expiredTombstonesCallback(ctx, tssExp)
+ log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
+ if len(tssExp) > 0 {
+ s.expiredTombstonesCallback(ctx, tssExp)
+ }
iterPrm.SetOffset(tss[tssLen-1].Address())
tss = tss[:0]
tssExp = tssExp[:0]
}
-
- log.Debug("finished expired tombstones handling")
}
-func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
- expired, err := s.getExpiredObjects(ctx, e.(newEpoch).epoch, func(typ object.Type) bool {
- return typ == object.TypeLock
- })
- if err != nil || len(expired) == 0 {
- if err != nil {
- s.log.Warn("iterator over expired locks failed", zap.String("error", err.Error()))
+func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
+ var err error
+ startedAt := time.Now()
+
+ defer func() {
+ s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
+ }()
+
+ s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch))
+
+ workersCount, batchSize := s.getExpiredObjectsParameters()
+
+ errGroup, egCtx := errgroup.WithContext(ctx)
+ errGroup.SetLimit(workersCount)
+
+ errGroup.Go(func() error {
+ batch := make([]oid.Address, 0, batchSize)
+
+ expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
+ if o.Type() == objectSDK.TypeLock {
+ batch = append(batch, o.Address())
+
+ if len(batch) == batchSize {
+ expired := batch
+ errGroup.Go(func() error {
+ s.expiredLocksCallback(egCtx, epoch, expired)
+ return egCtx.Err()
+ })
+ batch = make([]oid.Address, 0, batchSize)
+ }
+ }
+ })
+ if expErr != nil {
+ return expErr
}
- return
- }
- s.expiredLocksCallback(ctx, expired)
+ if len(batch) > 0 {
+ expired := batch
+ errGroup.Go(func() error {
+ s.expiredLocksCallback(egCtx, epoch, expired)
+ return egCtx.Err()
+ })
+ }
+
+ return nil
+ })
+
+ if err = errGroup.Wait(); err != nil {
+ s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err))
+ }
}
-func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, typeCond func(object.Type) bool) ([]oid.Address, error) {
+func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFound func(*meta.ExpiredObject)) error {
s.m.RLock()
defer s.m.RUnlock()
if s.info.Mode.NoMetabase() {
- return nil, ErrDegradedMode
+ return ErrDegradedMode
}
- var expired []oid.Address
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
- err := s.metaBase.IterateExpired(epoch, func(expiredObject *meta.ExpiredObject) error {
+ err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
select {
case <-ctx.Done():
return meta.ErrInterruptIterator
default:
- if typeCond(expiredObject.Type()) {
- expired = append(expired, expiredObject.Address())
- }
+ onExpiredFound(expiredObject)
return nil
}
})
+ if err != nil {
+ return err
+ }
+ return ctx.Err()
+}
+
+func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) {
+ release, err := s.opsLimiter.ReadRequest(ctx)
if err != nil {
return nil, err
}
- return expired, ctx.Err()
+ defer release()
+
+ return s.metaBase.FilterExpired(ctx, epoch, addresses)
}
// HandleExpiredTombstones marks tombstones themselves as garbage
// and clears up corresponding graveyard records.
//
// Does not modify tss.
-func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
- if s.GetMode().NoMetabase() {
+func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
return
}
- // Mark tombstones as garbage.
- var pInhume meta.InhumePrm
-
- tsAddrs := make([]oid.Address, 0, len(tss))
- for _, ts := range tss {
- tsAddrs = append(tsAddrs, ts.Tombstone())
- }
-
- pInhume.SetGCMark()
- pInhume.SetAddresses(tsAddrs...)
-
- // inhume tombstones
- res, err := s.metaBase.Inhume(pInhume)
+ release, err := s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn("could not mark tombstones as garbage",
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
+ return
+ }
+ res, err := s.metaBase.InhumeTombstones(ctx, tss)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
return
}
- s.decObjectCounterBy(logical, res.AvailableInhumed())
+ s.gc.metrics.AddInhumedObjectCount(res.LogicInhumed(), objectTypeTombstone)
+ s.decObjectCounterBy(logical, res.LogicInhumed())
+ s.decObjectCounterBy(user, res.UserInhumed())
+ s.decContainerObjectCounter(res.InhumedByCnrID())
i := 0
for i < res.GetDeletionInfoLength() {
@@ -421,45 +665,50 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size))
i++
}
-
- // drop just processed expired tombstones
- // from graveyard
- err = s.metaBase.DropGraves(tss)
- if err != nil {
- s.log.Warn("could not drop expired grave records", zap.Error(err))
- }
}
// HandleExpiredLocks unlocks all objects which were locked by lockers.
// If successful, marks lockers themselves as garbage.
-func (s *Shard) HandleExpiredLocks(lockers []oid.Address) {
- if s.GetMode().NoMetabase() {
+func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
return
}
- err := s.metaBase.FreeLockedBy(lockers)
+ release, err := s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn("failure to unlock objects",
- zap.String("error", err.Error()),
- )
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ return
+ }
+ unlocked, err := s.metaBase.FreeLockedBy(lockers)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
return
}
var pInhume meta.InhumePrm
pInhume.SetAddresses(lockers...)
- pInhume.SetGCMark()
-
- res, err := s.metaBase.Inhume(pInhume)
+ pInhume.SetForceGCMark()
+ release, err = s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn("failure to mark lockers as garbage",
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
+ return
+ }
+ res, err := s.metaBase.Inhume(ctx, pInhume)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
return
}
- s.decObjectCounterBy(logical, res.AvailableInhumed())
+ s.gc.metrics.AddInhumedObjectCount(res.LogicInhumed(), objectTypeLock)
+ s.decObjectCounterBy(logical, res.LogicInhumed())
+ s.decObjectCounterBy(user, res.UserInhumed())
+ s.decContainerObjectCounter(res.InhumedByCnrID())
i := 0
for i < res.GetDeletionInfoLength() {
@@ -467,25 +716,94 @@ func (s *Shard) HandleExpiredLocks(lockers []oid.Address) {
s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size))
i++
}
+
+ s.inhumeUnlockedIfExpired(ctx, epoch, unlocked)
+}
+
+func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
+ expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
+ return
+ }
+
+ if len(expiredUnlocked) == 0 {
+ return
+ }
+
+ s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked)
}
// HandleDeletedLocks unlocks all objects which were locked by lockers.
-func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
- if s.GetMode().NoMetabase() {
+func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
return
}
- err := s.metaBase.FreeLockedBy(lockers)
+ release, err := s.opsLimiter.WriteRequest(ctx)
if err != nil {
- s.log.Warn("failure to unlock objects",
- zap.String("error", err.Error()),
- )
-
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ return
+ }
+ _, err = s.metaBase.FreeLockedBy(lockers)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
return
}
}
-// NotificationChannel returns channel for shard events.
-func (s *Shard) NotificationChannel() chan<- Event {
- return s.gc.eventChan
+// NotificationChannel returns channel for new epoch events.
+func (s *Shard) NotificationChannel() chan<- uint64 {
+ return s.gc.newEpochChan
+}
+
+func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics")
+ defer span.End()
+
+ s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
+ defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
+
+ s.collectExpiredContainerSizeMetrics(ctx, epoch)
+ s.collectExpiredContainerCountMetrics(ctx, epoch)
+}
+
+func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ return
+ }
+ ids, err := s.metaBase.ZeroSizeContainers(ctx)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ return
+ }
+ if len(ids) == 0 {
+ return
+ }
+ s.zeroSizeContainersCallback(ctx, ids)
+}
+
+func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) {
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ return
+ }
+ ids, err := s.metaBase.ZeroCountContainers(ctx)
+ release()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+ return
+ }
+ if len(ids) == 0 {
+ return
+ }
+ s.zeroCountContainersCallback(ctx, ids)
}
diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go
new file mode 100644
index 0000000000..54d2f1510d
--- /dev/null
+++ b/pkg/local_object_storage/shard/gc_internal_test.go
@@ -0,0 +1,142 @@
+package shard
+
+import (
+ "context"
+ "path/filepath"
+ "testing"
+ "time"
+
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/panjf2000/ants/v2"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
+ t.Parallel()
+
+ rootPath := t.TempDir()
+
+ var sh *Shard
+ l := test.NewLogger(t)
+ blobOpts := []blobstor.Option{
+ blobstor.WithLogger(test.NewLogger(t)),
+ blobstor.WithStorages([]blobstor.SubStorage{
+ {
+ Storage: blobovniczatree.NewBlobovniczaTree(
+ context.Background(),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
+ blobovniczatree.WithBlobovniczaShallowDepth(1),
+ blobovniczatree.WithBlobovniczaShallowWidth(1)),
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
+ return len(data) <= 1<<20
+ },
+ },
+ {
+ Storage: fstree.New(
+ fstree.WithPath(filepath.Join(rootPath, "blob"))),
+ },
+ }),
+ }
+
+ opts := []Option{
+ WithID(NewIDFromBytes([]byte{})),
+ WithLogger(l),
+ WithBlobStorOptions(blobOpts...),
+ WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(rootPath, "meta")),
+ meta.WithEpochState(epochState{}),
+ ),
+ WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))),
+ WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(ctx, addresses)
+ }),
+ WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
+ sh.HandleExpiredLocks(ctx, epoch, a)
+ }),
+ WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ require.NoError(t, err)
+ return pool
+ }),
+ WithGCRemoverSleepInterval(1 * time.Second),
+ WithDisabledGC(),
+ }
+
+ sh = New(opts...)
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+ obj := testutil.GenerateObjectWithCID(cnr)
+ objID, _ := obj.ID()
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(objID)
+
+ var putPrm PutPrm
+ putPrm.SetObject(obj)
+
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ var getPrm GetPrm
+ getPrm.SetAddress(objectCore.AddressOf(obj))
+ _, err = sh.Get(context.Background(), getPrm)
+ require.NoError(t, err, "failed to get")
+
+ // inhume
+ var inhumePrm InhumePrm
+ inhumePrm.MarkAsGarbage(addr)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err, "failed to inhume")
+ _, err = sh.Get(context.Background(), getPrm)
+ require.Error(t, err, "get returned error")
+ require.True(t, client.IsErrObjectNotFound(err), "invalid error type")
+
+ // storageID
+ var metaStIDPrm meta.StorageIDPrm
+ metaStIDPrm.SetAddress(addr)
+ storageID, err := sh.metaBase.StorageID(context.Background(), metaStIDPrm)
+ require.NoError(t, err, "failed to get storage ID")
+
+ // check existence in blobstore
+ var bsExisted common.ExistsPrm
+ bsExisted.Address = addr
+ bsExisted.StorageID = storageID.StorageID()
+ exRes, err := sh.blobStor.Exists(context.Background(), bsExisted)
+ require.NoError(t, err, "failed to check blobstore existence")
+ require.True(t, exRes.Exists, "invalid blobstore existence result")
+
+ // drop from blobstor
+ var bsDeletePrm common.DeletePrm
+ bsDeletePrm.Address = addr
+ bsDeletePrm.StorageID = storageID.StorageID()
+ _, err = sh.blobStor.Delete(context.Background(), bsDeletePrm)
+ require.NoError(t, err, "failed to delete from blobstore")
+
+ // check existence in blobstore
+ exRes, err = sh.blobStor.Exists(context.Background(), bsExisted)
+ require.NoError(t, err, "failed to check blobstore existence")
+ require.False(t, exRes.Exists, "invalid blobstore existence result")
+
+ // get should return object not found
+ _, err = sh.Get(context.Background(), getPrm)
+ require.Error(t, err, "get returned no error")
+ require.True(t, client.IsErrObjectNotFound(err), "invalid error type")
+}
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
new file mode 100644
index 0000000000..f512a488ad
--- /dev/null
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -0,0 +1,295 @@
+package shard
+
+import (
+ "context"
+ "errors"
+ "testing"
+
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
+ t.Parallel()
+
+ epoch := &epochState{
+ Value: 100,
+ }
+
+ sh := newCustomShard(t, false, shardOptions{
+ metaOptions: []meta.Option{meta.WithEpochState(epoch)},
+ additionalShardOptions: []Option{WithGCWorkerPoolInitializer(func(int) util.WorkerPool {
+ return util.NewPseudoWorkerPool() // synchronous event processing
+ })},
+ })
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
+
+ cnr := cidtest.ID()
+
+ var objExpirationAttr objectSDK.Attribute
+ objExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch)
+ objExpirationAttr.SetValue("101")
+
+ obj := testutil.GenerateObjectWithCID(cnr)
+ obj.SetAttributes(objExpirationAttr)
+ objID, _ := obj.ID()
+
+ var lockExpirationAttr objectSDK.Attribute
+ lockExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch)
+ lockExpirationAttr.SetValue("103")
+
+ lock := testutil.GenerateObjectWithCID(cnr)
+ lock.SetType(objectSDK.TypeLock)
+ lock.SetAttributes(lockExpirationAttr)
+ lockID, _ := lock.ID()
+
+ var putPrm PutPrm
+ putPrm.SetObject(obj)
+
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID})
+ require.NoError(t, err)
+
+ putPrm.SetObject(lock)
+ _, err = sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ epoch.Value = 105
+ sh.gc.handleEvent(context.Background(), epoch.Value)
+
+ var getPrm GetPrm
+ getPrm.SetAddress(objectCore.AddressOf(obj))
+ _, err = sh.Get(context.Background(), getPrm)
+ require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired object must be deleted")
+}
+
+func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
+ t.Parallel()
+
+ epoch := &epochState{
+ Value: 100,
+ }
+
+ cnr := cidtest.ID()
+ parentID := oidtest.ID()
+ splitID := objectSDK.NewSplitID()
+
+ var objExpirationAttr objectSDK.Attribute
+ objExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch)
+ objExpirationAttr.SetValue("101")
+
+ var lockExpirationAttr objectSDK.Attribute
+ lockExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch)
+ lockExpirationAttr.SetValue("103")
+
+ parent := testutil.GenerateObjectWithCID(cnr)
+ parent.SetID(parentID)
+ parent.SetPayload(nil)
+ parent.SetAttributes(objExpirationAttr)
+
+ const childCount = 10
+ children := make([]*objectSDK.Object, childCount)
+ childIDs := make([]oid.ID, childCount)
+ for i := range children {
+ children[i] = testutil.GenerateObjectWithCID(cnr)
+ if i != 0 {
+ children[i].SetPreviousID(childIDs[i-1])
+ }
+ if i == len(children)-1 {
+ children[i].SetParent(parent)
+ }
+ children[i].SetSplitID(splitID)
+ children[i].SetPayload([]byte{byte(i), byte(i + 1), byte(i + 2)})
+ childIDs[i], _ = children[i].ID()
+ }
+
+ link := testutil.GenerateObjectWithCID(cnr)
+ link.SetParent(parent)
+ link.SetParentID(parentID)
+ link.SetSplitID(splitID)
+ link.SetChildren(childIDs...)
+
+ linkID, _ := link.ID()
+
+ sh := newCustomShard(t, false, shardOptions{
+ metaOptions: []meta.Option{meta.WithEpochState(epoch)},
+ additionalShardOptions: []Option{WithGCWorkerPoolInitializer(func(int) util.WorkerPool {
+ return util.NewPseudoWorkerPool() // synchronous event processing
+ })},
+ })
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
+
+ lock := testutil.GenerateObjectWithCID(cnr)
+ lock.SetType(objectSDK.TypeLock)
+ lock.SetAttributes(lockExpirationAttr)
+ lockID, _ := lock.ID()
+
+ var putPrm PutPrm
+
+ for _, child := range children {
+ putPrm.SetObject(child)
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+
+ putPrm.SetObject(link)
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ err = sh.Lock(context.Background(), cnr, lockID, append(childIDs, parentID, linkID))
+ require.NoError(t, err)
+
+ putPrm.SetObject(lock)
+ _, err = sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ var getPrm GetPrm
+ getPrm.SetAddress(objectCore.AddressOf(parent))
+
+ _, err = sh.Get(context.Background(), getPrm)
+ var splitInfoError *objectSDK.SplitInfoError
+ require.True(t, errors.As(err, &splitInfoError), "split info must be provided")
+
+ epoch.Value = 105
+ sh.gc.handleEvent(context.Background(), epoch.Value)
+
+ _, err = sh.Get(context.Background(), getPrm)
+ require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires")
+}
+
+func TestGCDropsObjectInhumedFromWritecache(t *testing.T) {
+ t.Parallel()
+
+ t.Run("flush write-cache before inhume", func(t *testing.T) {
+ t.Parallel()
+ testGCDropsObjectInhumedFromWritecache(t, true)
+ })
+
+ t.Run("don't flush write-cache before inhume", func(t *testing.T) {
+ t.Parallel()
+ testGCDropsObjectInhumedFromWritecache(t, false)
+ })
+}
+
+func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool) {
+ sh := newCustomShard(t, true, shardOptions{
+ additionalShardOptions: []Option{WithDisabledGC()},
+ wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
+ })
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
+
+ obj := testutil.GenerateObjectWithSize(1024)
+
+ var putPrm PutPrm
+ putPrm.SetObject(obj)
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ // writecache stores object
+ wcObj, err := sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj))
+ require.NoError(t, err)
+ require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(wcObj))
+
+ // blobstore doesn't store object
+ bsRes, err := sh.blobStor.Get(context.Background(), common.GetPrm{
+ Address: objectCore.AddressOf(obj),
+ })
+ require.ErrorAs(t, err, new(*apistatus.ObjectNotFound))
+ require.Nil(t, bsRes.Object)
+ require.Nil(t, bsRes.RawData)
+
+ if flushbeforeInhume {
+ sh.writeCache.Flush(context.Background(), false, false)
+ }
+
+ var inhumePrm InhumePrm
+ inhumePrm.MarkAsGarbage(objectCore.AddressOf(obj))
+ _, err = sh.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+
+ // writecache doesn't store object
+ wcObj, err = sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj))
+ require.Error(t, err)
+ require.Nil(t, wcObj)
+
+ if flushbeforeInhume {
+ // blobstore store object
+ bsRes, err = sh.blobStor.Get(context.Background(), common.GetPrm{
+ Address: objectCore.AddressOf(obj),
+ })
+ require.NoError(t, err)
+ require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(bsRes.Object))
+ } else {
+
+ // blobstore doesn't store object
+ bsRes, err = sh.blobStor.Get(context.Background(), common.GetPrm{
+ Address: objectCore.AddressOf(obj),
+ })
+ require.ErrorAs(t, err, new(*apistatus.ObjectNotFound))
+ require.Nil(t, bsRes.Object)
+ require.Nil(t, bsRes.RawData)
+ }
+
+ gcRes := sh.removeGarbage(context.Background())
+ require.True(t, gcRes.success)
+ require.Equal(t, uint64(1), gcRes.deleted)
+}
+
+func TestGCDontDeleteObjectFromWritecache(t *testing.T) {
+ sh := newCustomShard(t, true, shardOptions{
+ additionalShardOptions: []Option{WithDisabledGC()},
+ wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
+ })
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
+
+ obj := testutil.GenerateObjectWithSize(1024)
+
+ var putPrm PutPrm
+ putPrm.SetObject(obj)
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ // writecache stores object
+ wcObj, err := sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj))
+ require.NoError(t, err)
+ require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(wcObj))
+
+ // blobstore doesn't store object
+ bsRes, err := sh.blobStor.Get(context.Background(), common.GetPrm{
+ Address: objectCore.AddressOf(obj),
+ })
+ require.ErrorAs(t, err, new(*apistatus.ObjectNotFound))
+ require.Nil(t, bsRes.Object)
+ require.Nil(t, bsRes.RawData)
+
+ var metaInhumePrm meta.InhumePrm
+ metaInhumePrm.SetAddresses(objectCore.AddressOf(obj))
+ metaInhumePrm.SetLockObjectHandling()
+ metaInhumePrm.SetGCMark()
+ _, err = sh.metaBase.Inhume(context.Background(), metaInhumePrm)
+ require.NoError(t, err)
+
+ // logs: WARN shard/delete.go:98 can't remove object: object must be flushed from writecache
+ gcRes := sh.removeGarbage(context.Background())
+ require.True(t, gcRes.success)
+ require.Equal(t, uint64(0), gcRes.deleted)
+
+ // writecache stores object
+ wcObj, err = sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj))
+ require.NoError(t, err)
+ require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(wcObj))
+}
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index 4231c01dbd..28f8912be7 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -1,16 +1,22 @@
package shard
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -20,8 +26,9 @@ type storFetcher = func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object,
// GetPrm groups the parameters of Get operation.
type GetPrm struct {
- addr oid.Address
- skipMeta bool
+ addr oid.Address
+ skipMeta bool
+ skipEvacCheck bool
}
// GetRes groups the resulting values of Get operation.
@@ -43,6 +50,11 @@ func (p *GetPrm) SetIgnoreMeta(ignore bool) {
p.skipMeta = ignore
}
+// SkipEvacCheck is a Get option which instruct to skip check is evacuation in progress.
+func (p *GetPrm) SkipEvacCheck(val bool) {
+ p.skipEvacCheck = val
+}
+
// Object returns the requested object.
func (r GetRes) Object() *objectSDK.Object {
return r.obj
@@ -61,16 +73,33 @@ func (r GetRes) HasMeta() bool {
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in shard.
// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (s *Shard) Get(prm GetPrm) (GetRes, error) {
+// Returns the ErrShardDisabled if the shard is disabled.
+func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Get",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("skip_meta", prm.skipMeta),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
+ if s.info.Mode.Disabled() {
+ return GetRes{}, ErrShardDisabled
+ }
+
+ if s.info.EvacuationInProgress && !prm.skipEvacCheck {
+ return GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+
cb := func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, error) {
var getPrm common.GetPrm
getPrm.Address = prm.addr
getPrm.StorageID = id
- res, err := stor.Get(getPrm)
+ res, err := stor.Get(ctx, getPrm)
if err != nil {
return nil, err
}
@@ -79,11 +108,17 @@ func (s *Shard) Get(prm GetPrm) (GetRes, error) {
}
wc := func(c writecache.Cache) (*objectSDK.Object, error) {
- return c.Get(prm.addr)
+ return c.Get(ctx, prm.addr)
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return GetRes{}, err
+ }
+ defer release()
+
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
- obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc)
+ obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
return GetRes{
obj: obj,
@@ -96,7 +131,7 @@ func (s *Shard) Get(prm GetPrm) (GetRes, error) {
var emptyStorageID = make([]byte, 0)
// fetchObjectData looks through writeCache and blobStor to find object.
-func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) {
+func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) {
var (
mErr error
mRes meta.ExistsRes
@@ -105,16 +140,16 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
if !skipMeta {
var mPrm meta.ExistsPrm
mPrm.SetAddress(addr)
- mRes, mErr = s.metaBase.Exists(mPrm)
+ mRes, mErr = s.metaBase.Exists(ctx, mPrm)
if mErr != nil && !s.info.Mode.NoMetabase() {
return nil, false, mErr
}
if !mRes.Exists() {
- return nil, false, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
} else {
- s.log.Warn("fetching object without meta", zap.Stringer("addr", addr))
+ s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
}
if s.hasWriteCache() {
@@ -122,12 +157,12 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
if err == nil || IsErrOutOfRange(err) {
return res, false, err
}
- if IsErrNotFound(err) {
- s.log.Debug("object is missing in write-cache",
+ if client.IsErrObjectNotFound(err) {
+ s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache,
zap.Stringer("addr", addr),
zap.Bool("skip_meta", skipMeta))
} else {
- s.log.Error("failed to fetch object from write-cache",
+ s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache,
zap.Error(err),
zap.Stringer("addr", addr),
zap.Bool("skip_meta", skipMeta))
@@ -141,9 +176,9 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
var mPrm meta.StorageIDPrm
mPrm.SetAddress(addr)
- mExRes, err := s.metaBase.StorageID(mPrm)
+ mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
- return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
+ return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err)
}
storageID := mExRes.StorageID()
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index 5e1914a065..837991b734 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -1,13 +1,13 @@
-package shard_test
+package shard
import (
"bytes"
+ "context"
"errors"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -15,87 +15,91 @@ import (
)
func TestShard_Get(t *testing.T) {
+ t.Parallel()
+
t.Run("without write cache", func(t *testing.T) {
+ t.Parallel()
testShardGet(t, false)
})
t.Run("with write cache", func(t *testing.T) {
+ t.Parallel()
testShardGet(t, true)
})
}
func testShardGet(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer releaseShard(sh, t)
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
- var putPrm shard.PutPrm
- var getPrm shard.GetPrm
+ var putPrm PutPrm
+ var getPrm GetPrm
t.Run("small object", func(t *testing.T) {
- obj := generateObject(t)
- addAttribute(obj, "foo", "bar")
- addPayload(obj, 1<<5)
+ obj := testutil.GenerateObject()
+ testutil.AddAttribute(obj, "foo", "bar")
+ testutil.AddPayload(obj, 1<<5)
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(obj))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
t.Run("big object", func(t *testing.T) {
- obj := generateObject(t)
- addAttribute(obj, "foo", "bar")
+ obj := testutil.GenerateObject()
+ testutil.AddAttribute(obj, "foo", "bar")
obj.SetID(oidtest.ID())
- addPayload(obj, 1<<20) // big obj
+ testutil.AddPayload(obj, 1<<20) // big obj
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(obj))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
t.Run("parent object", func(t *testing.T) {
- obj := generateObject(t)
- addAttribute(obj, "foo", "bar")
+ obj := testutil.GenerateObject()
+ testutil.AddAttribute(obj, "foo", "bar")
cnr := cidtest.ID()
splitID := objectSDK.NewSplitID()
- parent := generateObjectWithCID(t, cnr)
- addAttribute(parent, "parent", "attribute")
+ parent := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(parent, "parent", "attribute")
- child := generateObjectWithCID(t, cnr)
+ child := testutil.GenerateObjectWithCID(cnr)
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
child.SetSplitID(splitID)
- addPayload(child, 1<<5)
+ testutil.AddPayload(child, 1<<5)
putPrm.SetObject(child)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(child))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.True(t, binaryEqual(child, res.Object()))
getPrm.SetAddress(object.AddressOf(parent))
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
var si *objectSDK.SplitInfoError
require.True(t, errors.As(err, &si))
@@ -109,19 +113,6 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
})
}
-func testGet(t *testing.T, sh *shard.Shard, getPrm shard.GetPrm, hasWriteCache bool) (shard.GetRes, error) {
- res, err := sh.Get(getPrm)
- if hasWriteCache {
- require.Eventually(t, func() bool {
- if shard.IsErrNotFound(err) {
- res, err = sh.Get(getPrm)
- }
- return !shard.IsErrNotFound(err)
- }, time.Second, time.Millisecond*100)
- }
- return res, err
-}
-
// binary equal is used when object contains empty lists in the structure and
// requre.Equal fails on comparing and []{} lists.
func binaryEqual(a, b *objectSDK.Object) bool {
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index 6913d3162d..34b8290d64 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -1,15 +1,23 @@
package shard
import (
+ "context"
+
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// HeadPrm groups the parameters of Head operation.
type HeadPrm struct {
- addr oid.Address
- raw bool
+ addr oid.Address
+ raw bool
+ ShardLooksBad bool
}
// HeadRes groups the resulting values of Head operation.
@@ -43,24 +51,44 @@ func (r HeadRes) Object() *objectSDK.Object {
// Returns an error of type apistatus.ObjectNotFound if object is missing in Shard.
// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (s *Shard) Head(prm HeadPrm) (HeadRes, error) {
+func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Head",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("raw", prm.raw),
+ ))
+ defer span.End()
+
var obj *objectSDK.Object
var err error
- if s.GetMode().NoMetabase() {
+ mode := s.GetMode()
+ if mode.NoMetabase() || (mode.ReadOnly() && prm.ShardLooksBad) {
var getPrm GetPrm
getPrm.SetAddress(prm.addr)
getPrm.SetIgnoreMeta(true)
var res GetRes
- res, err = s.Get(getPrm)
+ res, err = s.Get(ctx, getPrm)
obj = res.Object()
} else {
+ s.m.RLock()
+ defer s.m.RUnlock()
+ if s.info.EvacuationInProgress {
+ return HeadRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
var headParams meta.GetPrm
headParams.SetAddress(prm.addr)
headParams.SetRaw(prm.raw)
+ release, limitErr := s.opsLimiter.ReadRequest(ctx)
+ if limitErr != nil {
+ return HeadRes{}, limitErr
+ }
+ defer release()
+
var res meta.GetRes
- res, err = s.metaBase.Get(headParams)
+ res, err = s.metaBase.Get(ctx, headParams)
obj = res.Header()
}
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index a0862bd014..deb3019df0 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -1,46 +1,50 @@
-package shard_test
+package shard
import (
+ "context"
"errors"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
)
func TestShard_Head(t *testing.T) {
+ t.Parallel()
+
t.Run("without write cache", func(t *testing.T) {
+ t.Parallel()
testShardHead(t, false)
})
t.Run("with write cache", func(t *testing.T) {
+ t.Parallel()
testShardHead(t, true)
})
}
func testShardHead(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer releaseShard(sh, t)
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
- var putPrm shard.PutPrm
- var headPrm shard.HeadPrm
+ var putPrm PutPrm
+ var headPrm HeadPrm
t.Run("regular object", func(t *testing.T) {
- obj := generateObject(t)
- addAttribute(obj, "foo", "bar")
+ obj := testutil.GenerateObject()
+ testutil.AddAttribute(obj, "foo", "bar")
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
headPrm.SetAddress(object.AddressOf(obj))
- res, err := testHead(t, sh, headPrm, hasWriteCache)
+ res, err := sh.Head(context.Background(), headPrm)
require.NoError(t, err)
require.Equal(t, obj.CutPayload(), res.Object())
})
@@ -49,10 +53,10 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
cnr := cidtest.ID()
splitID := objectSDK.NewSplitID()
- parent := generateObjectWithCID(t, cnr)
- addAttribute(parent, "foo", "bar")
+ parent := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(parent, "foo", "bar")
- child := generateObjectWithCID(t, cnr)
+ child := testutil.GenerateObjectWithCID(cnr)
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
@@ -60,7 +64,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(child)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
headPrm.SetAddress(object.AddressOf(parent))
@@ -68,27 +72,14 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
var siErr *objectSDK.SplitInfoError
- _, err = testHead(t, sh, headPrm, hasWriteCache)
+ _, err = sh.Head(context.Background(), headPrm)
require.True(t, errors.As(err, &siErr))
headPrm.SetAddress(object.AddressOf(parent))
headPrm.SetRaw(false)
- head, err := sh.Head(headPrm)
+ head, err := sh.Head(context.Background(), headPrm)
require.NoError(t, err)
require.Equal(t, parent.CutPayload(), head.Object())
})
}
-
-func testHead(t *testing.T, sh *shard.Shard, headPrm shard.HeadPrm, hasWriteCache bool) (shard.HeadRes, error) {
- res, err := sh.Head(headPrm)
- if hasWriteCache {
- require.Eventually(t, func() bool {
- if shard.IsErrNotFound(err) {
- res, err = sh.Head(headPrm)
- }
- return !shard.IsErrNotFound(err)
- }, time.Second, time.Millisecond*100)
- }
- return res, err
-}
diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go
index 992a86c019..7391adef2e 100644
--- a/pkg/local_object_storage/shard/id.go
+++ b/pkg/local_object_storage/shard/id.go
@@ -1,7 +1,11 @@
package shard
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "context"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/mr-tron/base58"
"go.uber.org/zap"
)
@@ -27,37 +31,42 @@ func (s *Shard) ID() *ID {
}
// UpdateID reads shard ID saved in the metabase and updates it if it is missing.
-func (s *Shard) UpdateID() (err error) {
- if err = s.metaBase.Open(false); err != nil {
- return err
- }
- defer func() {
- cErr := s.metaBase.Close()
- if err == nil {
- err = cErr
- }
- }()
- id, err := s.metaBase.ReadShardID()
- if err != nil {
- return err
- }
- if len(id) != 0 {
- s.info.ID = NewIDFromBytes(id)
-
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.SetShardID(s.info.ID.String())
+func (s *Shard) UpdateID(ctx context.Context) (err error) {
+ var idFromMetabase []byte
+ modeDegraded := s.GetMode().NoMetabase()
+ if !modeDegraded {
+ if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil {
+ err = fmt.Errorf("read shard id from metabase: %w", err)
}
}
- s.log = &logger.Logger{Logger: s.log.With(zap.String("shard_id", s.info.ID.String()))}
+ if len(idFromMetabase) != 0 {
+ s.info.ID = NewIDFromBytes(idFromMetabase)
+ }
+
+ shardID := s.info.ID.String()
+ s.metricsWriter.SetShardID(shardID)
+ if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
+ s.writeCache.GetMetrics().SetShardID(shardID)
+ }
+
+ s.log = s.log.With(zap.Stringer("shard_id", s.info.ID))
s.metaBase.SetLogger(s.log)
s.blobStor.SetLogger(s.log)
if s.hasWriteCache() {
s.writeCache.SetLogger(s.log)
}
-
- if len(id) != 0 {
- return nil
+ s.metaBase.SetParentID(s.info.ID.String())
+ s.blobStor.SetParentID(s.info.ID.String())
+ if s.pilorama != nil {
+ s.pilorama.SetParentID(s.info.ID.String())
}
- return s.metaBase.WriteShardID(*s.info.ID)
+ s.opsLimiter.SetParentID(s.info.ID.String())
+
+ if len(idFromMetabase) == 0 && !modeDegraded {
+ if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil {
+ err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr))
+ }
+ }
+ return
}
diff --git a/pkg/local_object_storage/shard/info.go b/pkg/local_object_storage/shard/info.go
index a5b9c1a7d1..f01796ec74 100644
--- a/pkg/local_object_storage/shard/info.go
+++ b/pkg/local_object_storage/shard/info.go
@@ -16,6 +16,9 @@ type Info struct {
// Shard mode.
Mode mode.Mode
+ // True when evacuation is in progress.
+ EvacuationInProgress bool
+
// Information about the metabase.
MetaBaseInfo meta.Info
@@ -25,9 +28,6 @@ type Info struct {
// Information about the Write Cache.
WriteCacheInfo writecache.Info
- // Weight parameters of the shard.
- WeightValues WeightValues
-
// ErrorCount contains amount of errors occurred in shard operations.
ErrorCount uint32
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index 199bb8b3f7..c0fd65f4b6 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -5,8 +5,12 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -60,7 +64,13 @@ var ErrLockObjectRemoval = meta.ErrLockObjectRemoval
// if at least one object is locked.
//
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
-func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) {
+func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Inhume",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ ))
+ defer span.End()
+
s.m.RLock()
if s.info.Mode.ReadOnly() {
@@ -71,9 +81,15 @@ func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return InhumeRes{}, err
+ }
+ defer release()
+
if s.hasWriteCache() {
for i := range prm.target {
- _ = s.writeCache.Delete(prm.target[i])
+ _ = s.writeCache.Delete(ctx, prm.target[i])
}
}
@@ -91,15 +107,15 @@ func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) {
metaPrm.SetForceGCMark()
}
- res, err := s.metaBase.Inhume(metaPrm)
+ res, err := s.metaBase.Inhume(ctx, metaPrm)
if err != nil {
if errors.Is(err, meta.ErrLockObjectRemoval) {
s.m.RUnlock()
return InhumeRes{}, ErrLockObjectRemoval
}
- s.log.Debug("could not mark object to delete in metabase",
- zap.String("error", err.Error()),
+ s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
+ zap.Error(err),
)
s.m.RUnlock()
@@ -109,7 +125,9 @@ func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) {
s.m.RUnlock()
- s.decObjectCounterBy(logical, res.AvailableInhumed())
+ s.decObjectCounterBy(logical, res.LogicInhumed())
+ s.decObjectCounterBy(user, res.UserInhumed())
+ s.decContainerObjectCounter(res.InhumedByCnrID())
i := 0
for i < res.GetDeletionInfoLength() {
@@ -119,7 +137,7 @@ func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) {
}
if deletedLockObjs := res.DeletedLockObjects(); len(deletedLockObjs) != 0 {
- s.deletedLockCallBack(context.Background(), deletedLockObjs)
+ s.deletedLockCallBack(ctx, deletedLockObjs)
}
return InhumeRes{}, nil
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 8f673c7a8d..1421f0e188 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -1,54 +1,59 @@
-package shard_test
+package shard
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
)
func TestShard_Inhume(t *testing.T) {
+ t.Parallel()
+
t.Run("without write cache", func(t *testing.T) {
+ t.Parallel()
testShardInhume(t, false)
})
t.Run("with write cache", func(t *testing.T) {
+ t.Parallel()
testShardInhume(t, true)
})
}
func testShardInhume(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer releaseShard(sh, t)
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
- obj := generateObjectWithCID(t, cnr)
- addAttribute(obj, "foo", "bar")
+ obj := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj, "foo", "bar")
- ts := generateObjectWithCID(t, cnr)
+ ts := testutil.GenerateObjectWithCID(cnr)
- var putPrm shard.PutPrm
+ var putPrm PutPrm
putPrm.SetObject(obj)
- var inhPrm shard.InhumePrm
+ var inhPrm InhumePrm
inhPrm.SetTarget(object.AddressOf(ts), object.AddressOf(obj))
- var getPrm shard.GetPrm
+ var getPrm GetPrm
getPrm.SetAddress(object.AddressOf(obj))
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
- _, err = sh.Inhume(inhPrm)
+ _, err = sh.Inhume(context.Background(), inhPrm)
require.NoError(t, err)
- _, err = sh.Get(getPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ _, err = sh.Get(context.Background(), getPrm)
+ require.True(t, client.IsErrObjectAlreadyRemoved(err))
}
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index 9efca8983c..af87981cad 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -1,12 +1,17 @@
package shard
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -28,6 +33,30 @@ func (r ListContainersRes) Containers() []cid.ID {
return r.containers
}
+// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
+type IterateOverContainersPrm struct {
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, objectSDK.Type, cid.ID) error
+}
+
+// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type IterateOverObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
+ // Handler function executed upon objects in db.
+ Handler func(context.Context, *objectcore.Info) error
+}
+
+// CountAliveObjectsInContainerPrm contains parameters for CountAliveObjectsInContainer operation.
+type CountAliveObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
+}
+
// ListWithCursorPrm contains parameters for ListWithCursor operation.
type ListWithCursorPrm struct {
count uint32
@@ -36,7 +65,7 @@ type ListWithCursorPrm struct {
// ListWithCursorRes contains values returned from ListWithCursor operation.
type ListWithCursorRes struct {
- addrList []objectcore.AddressWithType
+ addrList []objectcore.Info
cursor *Cursor
}
@@ -53,7 +82,7 @@ func (p *ListWithCursorPrm) WithCursor(cursor *Cursor) {
}
// AddressList returns addresses selected by ListWithCursor operation.
-func (r ListWithCursorRes) AddressList() []objectcore.AddressWithType {
+func (r ListWithCursorRes) AddressList() []objectcore.Info {
return r.addrList
}
@@ -63,7 +92,13 @@ func (r ListWithCursorRes) Cursor() *Cursor {
}
// List returns all objects physically stored in the Shard.
-func (s *Shard) List() (res SelectRes, err error) {
+func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.List",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -71,12 +106,18 @@ func (s *Shard) List() (res SelectRes, err error) {
return SelectRes{}, ErrDegradedMode
}
- lst, err := s.metaBase.Containers()
+ release, err := s.opsLimiter.ReadRequest(ctx)
if err != nil {
- return res, fmt.Errorf("can't list stored containers: %w", err)
+ return SelectRes{}, err
+ }
+ defer release()
+
+ lst, err := s.metaBase.Containers(ctx)
+ if err != nil {
+ return res, fmt.Errorf("list stored containers: %w", err)
}
- filters := object.NewSearchFilters()
+ filters := objectSDK.NewSearchFilters()
filters.AddPhyFilter()
for i := range lst {
@@ -84,11 +125,11 @@ func (s *Shard) List() (res SelectRes, err error) {
sPrm.SetContainerID(lst[i])
sPrm.SetFilters(filters)
- sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase
+ sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase
if err != nil {
- s.log.Debug("can't select all objects",
+ s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),
- zap.String("error", err.Error()))
+ zap.Error(err))
continue
}
@@ -99,14 +140,26 @@ func (s *Shard) List() (res SelectRes, err error) {
return res, nil
}
-func (s *Shard) ListContainers(_ ListContainersPrm) (ListContainersRes, error) {
+func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListContainersRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ListContainers",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ ))
+ defer span.End()
+
if s.GetMode().NoMetabase() {
return ListContainersRes{}, ErrDegradedMode
}
- containers, err := s.metaBase.Containers()
+ release, err := s.opsLimiter.ReadRequest(ctx)
if err != nil {
- return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err)
+ return ListContainersRes{}, err
+ }
+ defer release()
+
+ containers, err := s.metaBase.Containers(ctx)
+ if err != nil {
+ return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err)
}
return ListContainersRes{
@@ -120,17 +173,30 @@ func (s *Shard) ListContainers(_ ListContainersPrm) (ListContainersRes, error) {
//
// Returns ErrEndOfListing if there are no more objects to return or count
// parameter set to zero.
-func (s *Shard) ListWithCursor(prm ListWithCursorPrm) (ListWithCursorRes, error) {
+func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (ListWithCursorRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.ListWithCursor",
+ trace.WithAttributes(
+ attribute.Int64("count", int64(prm.count)),
+ attribute.Bool("has_cursor", prm.cursor != nil),
+ ))
+ defer span.End()
+
if s.GetMode().NoMetabase() {
return ListWithCursorRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return ListWithCursorRes{}, err
+ }
+ defer release()
+
var metaPrm meta.ListPrm
metaPrm.SetCount(prm.count)
metaPrm.SetCursor(prm.cursor)
- res, err := s.metaBase.ListWithCursor(metaPrm)
+ res, err := s.metaBase.ListWithCursor(ctx, metaPrm)
if err != nil {
- return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err)
+ return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err)
}
return ListWithCursorRes{
@@ -138,3 +204,96 @@ func (s *Shard) ListWithCursor(prm ListWithCursorPrm) (ListWithCursorRes, error)
cursor: res.Cursor(),
}, nil
}
+
+// IterateOverContainers lists physical containers presented in shard.
+func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverContainers",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ var metaPrm meta.IterateOverContainersPrm
+ metaPrm.Handler = prm.Handler
+ err = s.metaBase.IterateOverContainers(ctx, metaPrm)
+ if err != nil {
+ return fmt.Errorf("iterate over containers: %w", err)
+ }
+
+ return nil
+}
+
+// IterateOverObjectsInContainer lists physical objects presented in shard for provided container's bucket name.
+func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverObjectsInContainer",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ var metaPrm meta.IterateOverObjectsInContainerPrm
+ metaPrm.ContainerID = prm.ContainerID
+ metaPrm.ObjectType = prm.ObjectType
+ metaPrm.Handler = prm.Handler
+ err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
+ if err != nil {
+ return fmt.Errorf("iterate over objects: %w", err)
+ }
+
+ return nil
+}
+
+// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
+func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket")
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
+
+ var metaPrm meta.CountAliveObjectsInContainerPrm
+ metaPrm.ObjectType = prm.ObjectType
+ metaPrm.ContainerID = prm.ContainerID
+ count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
+ if err != nil {
+ return 0, fmt.Errorf("count alive objects in bucket: %w", err)
+ }
+
+ return count, nil
+}
diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go
index fd669ee9d4..139b2e3166 100644
--- a/pkg/local_object_storage/shard/list_test.go
+++ b/pkg/local_object_storage/shard/list_test.go
@@ -1,62 +1,76 @@
-package shard_test
+package shard
import (
+ "context"
+ "sync"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
)
func TestShard_List(t *testing.T) {
- sh := newShard(t, false)
- shWC := newShard(t, true)
-
- defer func() {
- releaseShard(sh, t)
- releaseShard(shWC, t)
- }()
+ t.Parallel()
t.Run("without write cache", func(t *testing.T) {
+ t.Parallel()
+ sh := newShard(t, false)
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
testShardList(t, sh)
})
t.Run("with write cache", func(t *testing.T) {
+ t.Parallel()
+ shWC := newShard(t, true)
+ defer func() { require.NoError(t, shWC.Close(context.Background())) }()
testShardList(t, shWC)
})
}
-func testShardList(t *testing.T, sh *shard.Shard) {
+func testShardList(t *testing.T, sh *Shard) {
const C = 10
const N = 5
+ var mtx sync.Mutex
objs := make(map[string]int)
- var putPrm shard.PutPrm
- for i := 0; i < C; i++ {
- cnr := cidtest.ID()
+ var errG errgroup.Group
+ errG.SetLimit(C * N)
+ for range C {
+ errG.Go(func() error {
+ cnr := cidtest.ID()
- for j := 0; j < N; j++ {
- obj := generateObjectWithCID(t, cnr)
- addPayload(obj, 1<<2)
+ for range N {
+ errG.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddPayload(obj, 1<<2)
- // add parent as virtual object, it must be ignored in List()
- parent := generateObjectWithCID(t, cnr)
- idParent, _ := parent.ID()
- obj.SetParentID(idParent)
- obj.SetParent(parent)
+ // add parent as virtual object, it must be ignored in List()
+ parent := testutil.GenerateObjectWithCID(cnr)
+ idParent, _ := parent.ID()
+ obj.SetParentID(idParent)
+ obj.SetParent(parent)
- objs[object.AddressOf(obj).EncodeToString()] = 0
+ mtx.Lock()
+ objs[object.AddressOf(obj).EncodeToString()] = 0
+ mtx.Unlock()
- putPrm.SetObject(obj)
+ var putPrm PutPrm
+ putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
- require.NoError(t, err)
- }
+ _, err := sh.Put(context.Background(), putPrm)
+ return err
+ })
+ }
+ return nil
+ })
}
+ require.NoError(t, errG.Wait())
- res, err := sh.List()
+ res, err := sh.List(context.Background())
require.NoError(t, err)
for _, objID := range res.AddressList() {
diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go
index d8113cf305..9c392fdacf 100644
--- a/pkg/local_object_storage/shard/lock.go
+++ b/pkg/local_object_storage/shard/lock.go
@@ -1,11 +1,15 @@
package shard
import (
+ "context"
"fmt"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Lock marks objects as locked with another object. All objects from the
@@ -14,7 +18,16 @@ import (
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
-func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Lock",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", idCnr.EncodeToString()),
+ attribute.String("locker", locker.EncodeToString()),
+ attribute.Int("locked_count", len(locked)),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -25,7 +38,13 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
return ErrDegradedMode
}
- err := s.metaBase.Lock(idCnr, locker, locked)
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ err = s.metaBase.Lock(ctx, idCnr, locker, locked)
if err != nil {
return fmt.Errorf("metabase lock: %w", err)
}
@@ -35,19 +54,56 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
// IsLocked checks object locking relation of the provided object. Not found object is
// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
-func (s *Shard) IsLocked(addr oid.Address) (bool, error) {
+func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.IsLocked",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
m := s.GetMode()
if m.NoMetabase() {
return false, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer release()
+
var prm meta.IsLockedPrm
prm.SetAddress(addr)
- res, err := s.metaBase.IsLocked(prm)
+ res, err := s.metaBase.IsLocked(ctx, prm)
if err != nil {
return false, err
}
return res.Locked(), nil
}
+
+// GetLocks return lock id's of the provided object. Not found object is
+// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
+func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ m := s.GetMode()
+ if m.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+
+ return s.metaBase.GetLocks(ctx, addr)
+}
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index a16b7f5472..3878a65cd6 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -1,4 +1,4 @@
-package shard_test
+package shard
import (
"context"
@@ -9,12 +9,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -22,19 +23,24 @@ import (
)
func TestShard_Lock(t *testing.T) {
- var sh *shard.Shard
+ t.Parallel()
+
+ var sh *Shard
rootPath := t.TempDir()
- opts := []shard.Option{
- shard.WithLogger(&logger.Logger{Logger: zap.NewNop()}),
- shard.WithBlobStorOptions(
+ l := logger.NewLoggerWrapper(zap.NewNop())
+ opts := []Option{
+ WithID(NewIDFromBytes([]byte{})),
+ WithLogger(l),
+ WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
+ context.Background(),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(2),
blobovniczatree.WithBlobovniczaShallowWidth(2)),
- Policy: func(_ *object.Object, data []byte) bool {
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
return len(data) <= 1<<20
},
},
@@ -44,108 +50,109 @@ func TestShard_Lock(t *testing.T) {
},
}),
),
- shard.WithMetaBaseOptions(
+ WithMetaBaseOptions(
meta.WithPath(filepath.Join(rootPath, "meta")),
meta.WithEpochState(epochState{}),
),
- shard.WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(addresses)
+ WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(ctx, addresses)
}),
}
- sh = shard.New(opts...)
- require.NoError(t, sh.Open())
- require.NoError(t, sh.Init())
+ sh = New(opts...)
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
- t.Cleanup(func() {
- releaseShard(sh, t)
- })
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
- obj := generateObjectWithCID(t, cnr)
+ obj := testutil.GenerateObjectWithCID(cnr)
objID, _ := obj.ID()
- lock := generateObjectWithCID(t, cnr)
- lock.SetType(object.TypeLock)
+ lock := testutil.GenerateObjectWithCID(cnr)
+ lock.SetType(objectSDK.TypeLock)
lockID, _ := lock.ID()
// put the object
- var putPrm shard.PutPrm
+ var putPrm PutPrm
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// lock the object
- err = sh.Lock(cnr, lockID, []oid.ID{objID})
+ err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID})
require.NoError(t, err)
putPrm.SetObject(lock)
- _, err = sh.Put(putPrm)
+ _, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
t.Run("inhuming locked objects", func(t *testing.T) {
- ts := generateObjectWithCID(t, cnr)
+ ts := testutil.GenerateObjectWithCID(cnr)
- var inhumePrm shard.InhumePrm
+ var inhumePrm InhumePrm
inhumePrm.SetTarget(objectcore.AddressOf(ts), objectcore.AddressOf(obj))
- _, err = sh.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ var objLockedErr *apistatus.ObjectLocked
+
+ _, err = sh.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- _, err = sh.Inhume(inhumePrm)
- require.ErrorAs(t, err, new(apistatus.ObjectLocked))
+ _, err = sh.Inhume(context.Background(), inhumePrm)
+ require.ErrorAs(t, err, &objLockedErr)
})
t.Run("inhuming lock objects", func(t *testing.T) {
- ts := generateObjectWithCID(t, cnr)
+ ts := testutil.GenerateObjectWithCID(cnr)
- var inhumePrm shard.InhumePrm
+ var inhumePrm InhumePrm
inhumePrm.SetTarget(objectcore.AddressOf(ts), objectcore.AddressOf(lock))
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.Error(t, err)
inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock))
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.Error(t, err)
})
t.Run("force objects inhuming", func(t *testing.T) {
- var inhumePrm shard.InhumePrm
+ var inhumePrm InhumePrm
inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock))
inhumePrm.ForceRemoval()
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
// it should be possible to remove
// lock object now
- inhumePrm = shard.InhumePrm{}
+ inhumePrm = InhumePrm{}
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
// check that object has been removed
- var getPrm shard.GetPrm
+ var getPrm GetPrm
getPrm.SetAddress(objectcore.AddressOf(obj))
- _, err = sh.Get(getPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ _, err = sh.Get(context.Background(), getPrm)
+ require.True(t, client.IsErrObjectNotFound(err))
})
}
func TestShard_IsLocked(t *testing.T) {
sh := newShard(t, false)
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
- obj := generateObjectWithCID(t, cnr)
+ obj := testutil.GenerateObjectWithCID(cnr)
cnrID, _ := obj.ContainerID()
objID, _ := obj.ID()
@@ -153,24 +160,24 @@ func TestShard_IsLocked(t *testing.T) {
// put the object
- var putPrm shard.PutPrm
+ var putPrm PutPrm
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// not locked object is not locked
- locked, err := sh.IsLocked(objectcore.AddressOf(obj))
+ locked, err := sh.IsLocked(context.Background(), objectcore.AddressOf(obj))
require.NoError(t, err)
require.False(t, locked)
// locked object is locked
- require.NoError(t, sh.Lock(cnrID, lockID, []oid.ID{objID}))
+ require.NoError(t, sh.Lock(context.Background(), cnrID, lockID, []oid.ID{objID}))
- locked, err = sh.IsLocked(objectcore.AddressOf(obj))
+ locked, err = sh.IsLocked(context.Background(), objectcore.AddressOf(obj))
require.NoError(t, err)
require.True(t, locked)
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
new file mode 100644
index 0000000000..087ba42efa
--- /dev/null
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -0,0 +1,60 @@
+package shard
+
+import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+
+// MetricsWriter is an interface that must store shard's metrics.
+type MetricsWriter interface {
+ // SetObjectCounter must set object counter taking into account object type.
+ SetObjectCounter(objectType string, v uint64)
+ // AddToObjectCounter must update object counter taking into account object
+ // type.
+ // Negative parameter must decrease the counter.
+ AddToObjectCounter(objectType string, delta int)
+ // AddToContainerSize must add a value to the container size.
+ // Value can be negative.
+ AddToContainerSize(cnr string, value int64)
+ // AddToPayloadSize must add a value to the payload size.
+ // Value can be negative.
+ AddToPayloadSize(value int64)
+ // IncObjectCounter must increment shard's object counter taking into account
+ // object type.
+ IncObjectCounter(objectType string)
+ // SetShardID must set (update) the shard identifier that will be used in
+ // metrics.
+ SetShardID(id string)
+ // SetMode set mode of shard.
+ SetMode(mode mode.Mode)
+ // SetContainerObjectsCount sets container object count.
+ SetContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncContainerObjectsCount increments container object count.
+ IncContainerObjectsCount(cnrID string, objectType string)
+ // SubContainerObjectsCount subtracts container object count.
+ SubContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncRefillObjectsCount increments refill objects count.
+ IncRefillObjectsCount(path string, size int, success bool)
+ // SetRefillPercent sets refill percent.
+ SetRefillPercent(path string, percent uint32)
+ // SetRefillStatus sets refill status.
+ SetRefillStatus(path string, status string)
+ // SetEvacuationInProgress sets evacuation status
+ SetEvacuationInProgress(value bool)
+}
+
+type noopMetrics struct{}
+
+var _ MetricsWriter = noopMetrics{}
+
+func (noopMetrics) SetObjectCounter(string, uint64) {}
+func (noopMetrics) AddToObjectCounter(string, int) {}
+func (noopMetrics) AddToContainerSize(string, int64) {}
+func (noopMetrics) AddToPayloadSize(int64) {}
+func (noopMetrics) IncObjectCounter(string) {}
+func (noopMetrics) SetShardID(string) {}
+func (noopMetrics) SetMode(mode.Mode) {}
+func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {}
+func (noopMetrics) IncContainerObjectsCount(string, string) {}
+func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {}
+func (noopMetrics) IncRefillObjectsCount(string, int, bool) {}
+func (noopMetrics) SetRefillPercent(string, uint32) {}
+func (noopMetrics) SetRefillStatus(string, string) {}
+func (noopMetrics) SetEvacuationInProgress(bool) {}
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 13d839eee3..5230dcad0b 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -1,35 +1,85 @@
-package shard_test
+package shard
import (
+ "context"
"path/filepath"
+ "sync"
"testing"
+ "time"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
type metricsStore struct {
- objCounters map[string]uint64
- cnrSize map[string]int64
- pldSize int64
- readOnly bool
+ mtx sync.Mutex
+ objCounters map[string]uint64
+ cnrSize map[string]int64
+ cnrCount map[string]uint64
+ pldSize int64
+ mode mode.Mode
+ errCounter int64
+ refillCount int64
+ refillSize int64
+ refillPercent uint32
+ refillStatus string
}
-func (m metricsStore) SetShardID(_ string) {}
+func newMetricStore() *metricsStore {
+ return &metricsStore{
+ objCounters: map[string]uint64{
+ "phy": 0,
+ "logic": 0,
+ },
+ cnrSize: make(map[string]int64),
+ cnrCount: make(map[string]uint64),
+ }
+}
-func (m metricsStore) SetObjectCounter(objectType string, v uint64) {
+func (m *metricsStore) SetShardID(_ string) {}
+
+func (m *metricsStore) SetObjectCounter(objectType string, v uint64) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
m.objCounters[objectType] = v
}
-func (m metricsStore) AddToObjectCounter(objectType string, delta int) {
+func (m *metricsStore) getObjectCounter(objectType string) uint64 {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ return m.objCounters[objectType]
+}
+
+func (m *metricsStore) containerSizes() map[string]int64 {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ r := make(map[string]int64, len(m.cnrSize))
+ for c, s := range m.cnrSize {
+ r[c] = s
+ }
+ return r
+}
+
+func (m *metricsStore) payloadSize() int64 {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ return m.pldSize
+}
+
+func (m *metricsStore) AddToObjectCounter(objectType string, delta int) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
switch {
case delta > 0:
m.objCounters[objectType] += uint64(delta)
@@ -46,146 +96,303 @@ func (m metricsStore) AddToObjectCounter(objectType string, delta int) {
}
}
-func (m metricsStore) IncObjectCounter(objectType string) {
+func (m *metricsStore) IncObjectCounter(objectType string) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
m.objCounters[objectType] += 1
}
-func (m metricsStore) DecObjectCounter(objectType string) {
- m.AddToObjectCounter(objectType, -1)
+func (m *metricsStore) SetMode(mode mode.Mode) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ m.mode = mode
}
-func (m *metricsStore) SetReadonly(r bool) {
- m.readOnly = r
-}
-
-func (m metricsStore) AddToContainerSize(cnr string, size int64) {
+func (m *metricsStore) AddToContainerSize(cnr string, size int64) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
m.cnrSize[cnr] += size
}
func (m *metricsStore) AddToPayloadSize(size int64) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
m.pldSize += size
}
-const physical = "phy"
-const logical = "logic"
-const readonly = "readonly"
+func (m *metricsStore) IncErrorCounter() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ m.errCounter += 1
+}
+
+func (m *metricsStore) ClearErrorCounter() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ m.errCounter = 0
+}
+
+func (m *metricsStore) DeleteShardMetrics() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ m.errCounter = 0
+}
+
+func (m *metricsStore) SetContainerObjectsCount(cnrID string, objectType string, value uint64) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ m.cnrCount[cnrID+objectType] = value
+}
+
+func (m *metricsStore) IncContainerObjectsCount(cnrID string, objectType string) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ m.cnrCount[cnrID+objectType]++
+}
+
+func (m *metricsStore) SubContainerObjectsCount(cnrID string, objectType string, value uint64) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ existed := m.cnrCount[cnrID+objectType]
+ if existed < value {
+ panic("existed value smaller than value to sustract")
+ }
+ if existed == value {
+ delete(m.cnrCount, cnrID+objectType)
+ } else {
+ m.cnrCount[cnrID+objectType] -= value
+ }
+}
+
+func (m *metricsStore) getContainerCount(cnrID, objectType string) (uint64, bool) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ v, ok := m.cnrCount[cnrID+objectType]
+ return v, ok
+}
+
+func (m *metricsStore) IncRefillObjectsCount(_ string, size int, success bool) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ m.refillCount++
+ m.refillSize += int64(size)
+}
+
+func (m *metricsStore) SetRefillPercent(_ string, percent uint32) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ m.refillPercent = percent
+}
+
+func (m *metricsStore) SetRefillStatus(_ string, status string) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ m.refillStatus = status
+}
+
+func (m *metricsStore) SetEvacuationInProgress(bool) {
+}
func TestCounters(t *testing.T) {
+ t.Parallel()
+
dir := t.TempDir()
sh, mm := shardWithMetrics(t, dir)
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
- sh.SetMode(mode.ReadOnly)
- require.True(t, mm.readOnly)
- sh.SetMode(mode.ReadWrite)
- require.False(t, mm.readOnly)
+ sh.SetMode(context.Background(), mode.ReadOnly)
+ require.Equal(t, mode.ReadOnly, mm.mode)
+ sh.SetMode(context.Background(), mode.ReadWrite)
+ require.Equal(t, mode.ReadWrite, mm.mode)
const objNumber = 10
- oo := make([]*object.Object, objNumber)
- for i := 0; i < objNumber; i++ {
- oo[i] = generateObject(t)
+ oo := make([]*objectSDK.Object, objNumber)
+ for i := range objNumber {
+ oo[i] = testutil.GenerateObject()
}
t.Run("defaults", func(t *testing.T) {
- require.Zero(t, mm.objCounters[physical])
- require.Zero(t, mm.objCounters[logical])
- require.Empty(t, mm.cnrSize)
- require.Zero(t, mm.pldSize)
+ require.Zero(t, mm.getObjectCounter(physical))
+ require.Zero(t, mm.getObjectCounter(logical))
+ require.Empty(t, mm.containerSizes())
+ require.Zero(t, mm.payloadSize())
+
+ for _, obj := range oo {
+ contID, _ := obj.ContainerID()
+ v, ok := mm.getContainerCount(contID.EncodeToString(), physical)
+ require.Zero(t, v)
+ require.False(t, ok)
+ v, ok = mm.getContainerCount(contID.EncodeToString(), logical)
+ require.Zero(t, v)
+ require.False(t, ok)
+ v, ok = mm.getContainerCount(contID.EncodeToString(), user)
+ require.Zero(t, v)
+ require.False(t, ok)
+ }
})
var totalPayload int64
- expectedSizes := make(map[string]int64)
+ expectedLogicalSizes := make(map[string]int64)
+ expected := make(map[cid.ID]meta.ObjectCounters)
for i := range oo {
cnr, _ := oo[i].ContainerID()
oSize := int64(oo[i].PayloadSize())
- expectedSizes[cnr.EncodeToString()] += oSize
+ expectedLogicalSizes[cnr.EncodeToString()] += oSize
totalPayload += oSize
+ expected[cnr] = meta.ObjectCounters{
+ Logic: 1,
+ Phy: 1,
+ User: 1,
+ }
}
- t.Run("put", func(t *testing.T) {
- var prm shard.PutPrm
+ var prm PutPrm
- for i := 0; i < objNumber; i++ {
- prm.SetObject(oo[i])
+ for i := range objNumber {
+ prm.SetObject(oo[i])
- _, err := sh.Put(prm)
- require.NoError(t, err)
- }
+ _, err := sh.Put(context.Background(), prm)
+ require.NoError(t, err)
+ }
- require.Equal(t, uint64(objNumber), mm.objCounters[physical])
- require.Equal(t, uint64(objNumber), mm.objCounters[logical])
- require.Equal(t, expectedSizes, mm.cnrSize)
- require.Equal(t, totalPayload, mm.pldSize)
- })
+ require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical))
+ require.Equal(t, uint64(objNumber), mm.getObjectCounter(logical))
+ require.Equal(t, uint64(objNumber), mm.getObjectCounter(user))
+ require.Equal(t, expectedLogicalSizes, mm.containerSizes())
+ require.Equal(t, totalPayload, mm.payloadSize())
+
+ cc, err := sh.metaBase.ContainerCounters(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, meta.ContainerCounters{Counts: expected}, cc)
t.Run("inhume_GC", func(t *testing.T) {
- var prm shard.InhumePrm
+ var prm InhumePrm
inhumedNumber := objNumber / 4
- for i := 0; i < inhumedNumber; i++ {
+ for i := range inhumedNumber {
prm.MarkAsGarbage(objectcore.AddressOf(oo[i]))
- _, err := sh.Inhume(prm)
+ _, err := sh.Inhume(context.Background(), prm)
require.NoError(t, err)
+
+ cid, ok := oo[i].ContainerID()
+ require.True(t, ok)
+ expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize())
+
+ if v, ok := expected[cid]; ok {
+ v.Logic--
+ v.User--
+ if v.IsZero() {
+ delete(expected, cid)
+ } else {
+ expected[cid] = v
+ }
+ }
}
- require.Equal(t, uint64(objNumber), mm.objCounters[physical])
- require.Equal(t, uint64(objNumber-inhumedNumber), mm.objCounters[logical])
- require.Equal(t, expectedSizes, mm.cnrSize)
- require.Equal(t, totalPayload, mm.pldSize)
+ require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical))
+ require.Equal(t, uint64(objNumber-inhumedNumber), mm.getObjectCounter(logical))
+ require.Equal(t, uint64(objNumber-inhumedNumber), mm.getObjectCounter(user))
+ require.Equal(t, expectedLogicalSizes, mm.containerSizes())
+ require.Equal(t, totalPayload, mm.payloadSize())
+
+ cc, err := sh.metaBase.ContainerCounters(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, meta.ContainerCounters{Counts: expected}, cc)
oo = oo[inhumedNumber:]
})
t.Run("inhume_TS", func(t *testing.T) {
- var prm shard.InhumePrm
- ts := objectcore.AddressOf(generateObject(t))
+ var prm InhumePrm
- phy := mm.objCounters[physical]
- logic := mm.objCounters[logical]
+ phy := mm.getObjectCounter(physical)
+ logic := mm.getObjectCounter(logical)
+ custom := mm.getObjectCounter(user)
inhumedNumber := int(phy / 4)
- prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
+ for _, o := range addrFromObjs(oo[:inhumedNumber]) {
+ ts := oidtest.Address()
+ ts.SetContainer(o.Container())
+ prm.SetTarget(ts, o)
+ _, err := sh.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ }
- _, err := sh.Inhume(prm)
+ for i := range inhumedNumber {
+ cid, ok := oo[i].ContainerID()
+ require.True(t, ok)
+ expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize())
+
+ if v, ok := expected[cid]; ok {
+ v.Logic--
+ v.User--
+ if v.IsZero() {
+ delete(expected, cid)
+ } else {
+ expected[cid] = v
+ }
+ }
+ }
+
+ require.Equal(t, phy, mm.getObjectCounter(physical))
+ require.Equal(t, logic-uint64(inhumedNumber), mm.getObjectCounter(logical))
+ require.Equal(t, custom-uint64(inhumedNumber), mm.getObjectCounter(user))
+ require.Equal(t, expectedLogicalSizes, mm.containerSizes())
+ require.Equal(t, totalPayload, mm.payloadSize())
+
+ cc, err = sh.metaBase.ContainerCounters(context.Background())
require.NoError(t, err)
-
- require.Equal(t, phy, mm.objCounters[physical])
- require.Equal(t, logic-uint64(inhumedNumber), mm.objCounters[logical])
- require.Equal(t, expectedSizes, mm.cnrSize)
- require.Equal(t, totalPayload, mm.pldSize)
+ require.Equal(t, meta.ContainerCounters{Counts: expected}, cc)
oo = oo[inhumedNumber:]
})
t.Run("Delete", func(t *testing.T) {
- var prm shard.DeletePrm
+ var prm DeletePrm
- phy := mm.objCounters[physical]
- logic := mm.objCounters[logical]
+ phy := mm.getObjectCounter(physical)
+ logic := mm.getObjectCounter(logical)
+ custom := mm.getObjectCounter(user)
deletedNumber := int(phy / 4)
prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...)
- _, err := sh.Delete(prm)
+ _, err := sh.Delete(context.Background(), prm)
require.NoError(t, err)
- require.Equal(t, phy-uint64(deletedNumber), mm.objCounters[physical])
- require.Equal(t, logic-uint64(deletedNumber), mm.objCounters[logical])
+ require.Equal(t, phy-uint64(deletedNumber), mm.getObjectCounter(physical))
+ require.Equal(t, logic-uint64(deletedNumber), mm.getObjectCounter(logical))
+ require.Equal(t, custom-uint64(deletedNumber), mm.getObjectCounter(user))
var totalRemovedpayload uint64
for i := range oo[:deletedNumber] {
removedPayload := oo[i].PayloadSize()
totalRemovedpayload += removedPayload
cnr, _ := oo[i].ContainerID()
- expectedSizes[cnr.EncodeToString()] -= int64(removedPayload)
+ expectedLogicalSizes[cnr.EncodeToString()] -= int64(removedPayload)
+
+ if v, ok := expected[cnr]; ok {
+ v.Logic--
+ v.Phy--
+ v.User--
+ expected[cnr] = v
+ }
}
- require.Equal(t, expectedSizes, mm.cnrSize)
- require.Equal(t, totalPayload-int64(totalRemovedpayload), mm.pldSize)
+ require.Equal(t, expectedLogicalSizes, mm.containerSizes())
+ require.Equal(t, totalPayload-int64(totalRemovedpayload), mm.payloadSize())
+
+ cc, err = sh.metaBase.ContainerCounters(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, meta.ContainerCounters{Counts: expected}, cc)
})
}
-func shardWithMetrics(t *testing.T, path string) (*shard.Shard, *metricsStore) {
+func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
blobOpts := []blobstor.Option{
blobstor.WithStorages([]blobstor.SubStorage{
{
@@ -197,36 +404,28 @@ func shardWithMetrics(t *testing.T, path string) (*shard.Shard, *metricsStore) {
}),
}
- mm := &metricsStore{
- objCounters: map[string]uint64{
- "phy": 0,
- "logic": 0,
- },
- cnrSize: make(map[string]int64),
- }
+ mm := newMetricStore()
- sh := shard.New(
- shard.WithBlobStorOptions(blobOpts...),
- shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(path, "pilorama"))),
- shard.WithMetaBaseOptions(
+ sh := New(
+ WithID(NewIDFromBytes([]byte{})),
+ WithBlobStorOptions(blobOpts...),
+ WithPiloramaOptions(pilorama.WithPath(filepath.Join(path, "pilorama"))),
+ WithMetaBaseOptions(
meta.WithPath(filepath.Join(path, "meta")),
meta.WithEpochState(epochState{})),
- shard.WithMetricsWriter(mm),
+ WithMetricsWriter(mm),
+ WithGCRemoverSleepInterval(time.Hour),
)
- require.NoError(t, sh.Open())
- require.NoError(t, sh.Init())
-
- t.Cleanup(func() {
- sh.Close()
- })
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
return sh, mm
}
-func addrFromObjs(oo []*object.Object) []oid.Address {
+func addrFromObjs(oo []*objectSDK.Object) []oid.Address {
aa := make([]oid.Address, len(oo))
- for i := 0; i < len(oo); i++ {
+ for i := range len(oo) {
aa[i] = objectcore.AddressOf(oo[i])
}
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index 17ed3f3c89..901528976d 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -1,6 +1,9 @@
package shard
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"go.uber.org/zap"
@@ -17,19 +20,21 @@ var ErrDegradedMode = logicerr.New("shard is in degraded mode")
//
// Returns any error encountered that did not allow
// setting shard mode.
-func (s *Shard) SetMode(m mode.Mode) error {
- s.m.Lock()
- defer s.m.Unlock()
+func (s *Shard) SetMode(ctx context.Context, m mode.Mode) error {
+ unlock := s.lockExclusive()
+ defer unlock()
- return s.setMode(m)
+ return s.setMode(ctx, m)
}
-func (s *Shard) setMode(m mode.Mode) error {
- s.log.Info("setting shard mode",
+func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
+ s.log.Info(ctx, logs.ShardSettingShardMode,
zap.Stringer("old_mode", s.info.Mode),
zap.Stringer("new_mode", m))
- components := []interface{ SetMode(mode.Mode) error }{
+ components := []interface {
+ SetMode(context.Context, mode.Mode) error
+ }{
s.metaBase, s.blobStor,
}
@@ -55,18 +60,18 @@ func (s *Shard) setMode(m mode.Mode) error {
}
}
- for i := range components {
- if err := components[i].SetMode(m); err != nil {
- return err
+ if !m.Disabled() {
+ for i := range components {
+ if err := components[i].SetMode(ctx, m); err != nil {
+ return err
+ }
}
}
s.info.Mode = m
- if s.metricsWriter != nil {
- s.metricsWriter.SetReadonly(s.info.Mode != mode.ReadWrite)
- }
+ s.metricsWriter.SetMode(s.info.Mode)
- s.log.Info("shard mode set successfully",
+ s.log.Info(ctx, logs.ShardShardModeSetSuccessfully,
zap.Stringer("mode", s.info.Mode))
return nil
}
diff --git a/pkg/local_object_storage/shard/mode/mode.go b/pkg/local_object_storage/shard/mode/mode.go
index 65b2b5c895..dc4d52b0e5 100644
--- a/pkg/local_object_storage/shard/mode/mode.go
+++ b/pkg/local_object_storage/shard/mode/mode.go
@@ -8,27 +8,41 @@ type Mode uint32
const (
// ReadWrite is a Mode value for shard that is available
// for read and write operations. Default shard mode.
- ReadWrite Mode = 0
+ ReadWrite Mode = 0b000
- // DegradedReadOnly is a Mode value for shard that is set automatically
- // after a certain number of errors is encountered. It is the same as
- // `mode.Degraded` but also is read-only.
- DegradedReadOnly = Degraded | ReadOnly
+ // ReadOnly is a Mode value for shard that does not
+ // accept write operation but is readable.
+ ReadOnly Mode = 0b001
+
+ // Degraded is a Mode value for shard when the metabase is unavailable.
+ // It is hard to perform some modifying operations in this mode, thus it can only be set by an administrator.
+ Degraded Mode = 0b010
// Disabled mode is a mode where a shard is disabled.
// An existing shard can't have this mode, but it can be used in
// the configuration or control service commands.
- Disabled = math.MaxUint32
+ Disabled Mode = math.MaxUint32
+
+ // DegradedReadOnly is a Mode value for shard that is set automatically
+ // after a certain number of errors is encountered. It is the same as
+ // `mode.Degraded` but also is read-only.
+ DegradedReadOnly Mode = Degraded | ReadOnly
)
-const (
- // ReadOnly is a Mode value for shard that does not
- // accept write operation but is readable.
- ReadOnly Mode = 1 << iota
+// ComponentMode represents basic operation modes for shared components, including READ, READ_WRITE, and DISABLED.
+type ComponentMode uint32
- // Degraded is a Mode value for shard when the metabase is unavailable.
- // It is hard to perform some modifying operations in this mode, thus it can only be set by an administrator.
- Degraded
+const (
+ // ComponentReadWrite is a Mode value for component that is available
+ // for read and write operations. Default component mode.
+ ComponentReadWrite ComponentMode = 0
+
+ // ComponentReadOnly is a Mode value for component that does not
+ // accept write operation but is readable.
+ ComponentReadOnly ComponentMode = 0b001
+
+ // ComponentDisabled mode is a mode where a component is disabled.
+ ComponentDisabled ComponentMode = math.MaxUint32
)
func (m Mode) String() string {
@@ -48,6 +62,19 @@ func (m Mode) String() string {
}
}
+func (m ComponentMode) String() string {
+ switch m {
+ default:
+ return "UNDEFINED"
+ case ComponentReadWrite:
+ return "READ_WRITE"
+ case ComponentReadOnly:
+ return "READ_ONLY"
+ case ComponentDisabled:
+ return "CLOSED"
+ }
+}
+
// NoMetabase returns true iff m is operating without the metabase.
func (m Mode) NoMetabase() bool {
return m&Degraded != 0
@@ -57,3 +84,40 @@ func (m Mode) NoMetabase() bool {
func (m Mode) ReadOnly() bool {
return m&ReadOnly != 0
}
+
+// ReadOnly returns true iff m prohibits modifying operations with shard.
+func (m ComponentMode) ReadOnly() bool {
+ return m&ComponentReadOnly != 0
+}
+
+func (m Mode) Disabled() bool {
+ return m == Disabled
+}
+
+func (m ComponentMode) Disabled() bool {
+ return m == ComponentDisabled
+}
+
+// ConvertToComponentModeDegraded converts a ShardMode to a corresponding ComponentMode.
+// Disables the component if the node is in degraded mode. Used in Metabase, Writecache, Pilorama.
+func ConvertToComponentModeDegraded(m Mode) ComponentMode {
+ if m.NoMetabase() || m.Disabled() {
+ return ComponentDisabled
+ }
+ if m.ReadOnly() {
+ return ComponentReadOnly
+ }
+ return ComponentReadWrite
+}
+
+// ConvertToComponentMode converts a ShardMode to a corresponding ComponentMode.
+// Ignores the degraded mode of the node. Used in Blobstore.
+func ConvertToComponentMode(m Mode) ComponentMode {
+ if m.Disabled() {
+ return ComponentDisabled
+ }
+ if m.ReadOnly() {
+ return ComponentReadOnly
+ }
+ return ComponentReadWrite
+}
diff --git a/pkg/local_object_storage/shard/move.go b/pkg/local_object_storage/shard/move.go
deleted file mode 100644
index c6bf8409e0..0000000000
--- a/pkg/local_object_storage/shard/move.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package shard
-
-import (
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-// ToMoveItPrm encapsulates parameters for ToMoveIt operation.
-type ToMoveItPrm struct {
- addr oid.Address
-}
-
-// ToMoveItRes encapsulates results of ToMoveIt operation.
-type ToMoveItRes struct{}
-
-// SetAddress sets object address that should be marked to move into another
-// shard.
-func (p *ToMoveItPrm) SetAddress(addr oid.Address) {
- p.addr = addr
-}
-
-// ToMoveIt calls metabase.ToMoveIt method to mark object as relocatable to
-// another shard.
-func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- m := s.info.Mode
- if m.ReadOnly() {
- return ToMoveItRes{}, ErrReadOnlyMode
- } else if m.NoMetabase() {
- return ToMoveItRes{}, ErrDegradedMode
- }
-
- var toMovePrm meta.ToMoveItPrm
- toMovePrm.SetAddress(prm.addr)
-
- _, err := s.metaBase.ToMoveIt(toMovePrm)
- if err != nil {
- s.log.Debug("could not mark object for shard relocation in metabase",
- zap.String("error", err.Error()),
- )
- }
-
- return ToMoveItRes{}, nil
-}
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index 48dbe1be29..f8cb00a31e 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -1,35 +1,52 @@
package shard
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *object.Object
+ obj *objectSDK.Object
+ indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
type PutRes struct{}
// SetObject is a Put option to set object to save.
-func (p *PutPrm) SetObject(obj *object.Object) {
+func (p *PutPrm) SetObject(obj *objectSDK.Object) {
p.obj = obj
}
+func (p *PutPrm) SetIndexAttributes(v bool) {
+ p.indexAttributes = v
+}
+
// Put saves the object in shard.
//
// Returns any error encountered that
// did not allow to completely save the object.
//
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
-func (s *Shard) Put(prm PutPrm) (PutRes, error) {
+func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Put",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -50,21 +67,27 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
var res common.PutRes
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return PutRes{}, err
+ }
+ defer release()
+
// exist check are not performed there, these checks should be executed
// ahead of `Put` by storage engine
tryCache := s.hasWriteCache() && !m.NoMetabase()
if tryCache {
- res, err = s.writeCache.Put(putPrm)
+ res, err = s.writeCache.Put(ctx, putPrm)
}
if err != nil || !tryCache {
if err != nil {
- s.log.Debug("can't put object to the write-cache, trying blobstor",
- zap.String("err", err.Error()))
+ s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
+ zap.Error(err))
}
- res, err = s.blobStor.Put(putPrm)
+ res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
- return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
+ return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err)
}
}
@@ -72,15 +95,19 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
var pPrm meta.PutPrm
pPrm.SetObject(prm.obj)
pPrm.SetStorageID(res.StorageID)
- if _, err := s.metaBase.Put(pPrm); err != nil {
+ pPrm.SetIndexAttributes(prm.indexAttributes)
+ res, err := s.metaBase.Put(ctx, pPrm)
+ if err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
- return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)
+ return PutRes{}, fmt.Errorf("put object to metabase: %w", err)
}
- s.incObjectCounter()
- s.addToPayloadSize(int64(prm.obj.PayloadSize()))
- s.addToContainerSize(putPrm.Address.Container().EncodeToString(), int64(prm.obj.PayloadSize()))
+ if res.Inserted {
+ s.incObjectCounter(putPrm.Address.Container(), meta.IsUserObject(prm.obj))
+ s.addToPayloadSize(int64(prm.obj.PayloadSize()))
+ s.addToContainerSize(putPrm.Address.Container().EncodeToString(), int64(prm.obj.PayloadSize()))
+ }
}
return PutRes{}, nil
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index c9106c235a..4436891048 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -1,13 +1,19 @@
package shard
import (
+ "context"
+ "strconv"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// RngPrm groups the parameters of GetRange operation.
@@ -23,7 +29,7 @@ type RngPrm struct {
// RngRes groups the resulting values of GetRange operation.
type RngRes struct {
- obj *object.Object
+ obj *objectSDK.Object
hasMeta bool
}
@@ -48,7 +54,7 @@ func (p *RngPrm) SetIgnoreMeta(ignore bool) {
// Object returns the requested object part.
//
// Instance payload contains the requested range of the original object.
-func (r RngRes) Object() *object.Object {
+func (r RngRes) Object() *objectSDK.Object {
return r.obj
}
@@ -65,31 +71,50 @@ func (r RngRes) HasMeta() bool {
// Returns ErrRangeOutOfBounds if the requested object range is out of bounds.
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing.
// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard.
-// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (s *Shard) GetRange(prm RngPrm) (RngRes, error) {
+// Returns the objectSDK.ErrObjectIsExpired if the object is presented but already expired.
+// Returns the ErrShardDisabled if the shard is disabled.
+func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetRange",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("skip_meta", prm.skipMeta),
+ attribute.String("offset", strconv.FormatUint(prm.off, 10)),
+ attribute.String("length", strconv.FormatUint(prm.ln, 10)),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
- cb := func(stor *blobstor.BlobStor, id []byte) (*object.Object, error) {
+ if s.info.EvacuationInProgress {
+ return RngRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+
+ if s.info.Mode.Disabled() {
+ return RngRes{}, ErrShardDisabled
+ }
+
+ cb := func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, error) {
var getRngPrm common.GetRangePrm
getRngPrm.Address = prm.addr
getRngPrm.Range.SetOffset(prm.off)
getRngPrm.Range.SetLength(prm.ln)
getRngPrm.StorageID = id
- res, err := stor.GetRange(getRngPrm)
+ res, err := stor.GetRange(ctx, getRngPrm)
if err != nil {
return nil, err
}
- obj := object.New()
+ obj := objectSDK.New()
obj.SetPayload(res.Data)
return obj, nil
}
- wc := func(c writecache.Cache) (*object.Object, error) {
- res, err := c.Get(prm.addr)
+ wc := func(c writecache.Cache) (*objectSDK.Object, error) {
+ res, err := c.Get(ctx, prm.addr)
if err != nil {
return nil, err
}
@@ -98,16 +123,22 @@ func (s *Shard) GetRange(prm RngPrm) (RngRes, error) {
from := prm.off
to := from + prm.ln
if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to {
- return nil, logicerr.Wrap(apistatus.ObjectOutOfRange{})
+ return nil, logicerr.Wrap(new(apistatus.ObjectOutOfRange))
}
- obj := object.New()
+ obj := objectSDK.New()
obj.SetPayload(payload[from:to])
return obj, nil
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return RngRes{}, err
+ }
+ defer release()
+
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
- obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc)
+ obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
return RngRes{
obj: obj,
diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go
index 328a217ddc..06fe9f5110 100644
--- a/pkg/local_object_storage/shard/range_test.go
+++ b/pkg/local_object_storage/shard/range_test.go
@@ -1,6 +1,8 @@
-package shard_test
+package shard
import (
+ "bytes"
+ "context"
"math"
"path/filepath"
"testing"
@@ -9,22 +11,23 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "github.com/nspcc-dev/neo-go/pkg/util/slice"
"github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
)
func TestShard_GetRange(t *testing.T) {
+ t.Parallel()
t.Run("without write cache", func(t *testing.T) {
+ t.Parallel()
testShardGetRange(t, false)
})
t.Run("with write cache", func(t *testing.T) {
+ t.Parallel()
testShardGetRange(t, true)
})
}
@@ -65,48 +68,58 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
testCase{true, "object in write-cache, out of range, big offset", 100, newRange(101, math.MaxUint64-10)})
}
- sh := newCustomShard(t, t.TempDir(), hasWriteCache,
- []writecache.Option{writecache.WithMaxObjectSize(writeCacheMaxSize)},
- []blobstor.Option{blobstor.WithStorages([]blobstor.SubStorage{
- {
- Storage: blobovniczatree.NewBlobovniczaTree(
- blobovniczatree.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
- blobovniczatree.WithBlobovniczaShallowDepth(1),
- blobovniczatree.WithBlobovniczaShallowWidth(1)),
- Policy: func(_ *objectSDK.Object, data []byte) bool {
- return len(data) <= smallObjectSize
+ wcOpts := []writecache.Option{
+ writecache.WithMaxObjectSize(writeCacheMaxSize),
+ }
+
+ sh := newCustomShard(t, hasWriteCache, shardOptions{
+ wcOpts: wcOpts,
+ bsOpts: []blobstor.Option{
+ blobstor.WithStorages([]blobstor.SubStorage{
+ {
+ Storage: blobovniczatree.NewBlobovniczaTree(
+ context.Background(),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
+ blobovniczatree.WithBlobovniczaShallowDepth(1),
+ blobovniczatree.WithBlobovniczaShallowWidth(1)),
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
+ return len(data) <= smallObjectSize
+ },
},
- },
- {
- Storage: fstree.New(
- fstree.WithPath(filepath.Join(t.TempDir(), "blob"))),
- },
- })})
- defer releaseShard(sh, t)
+ {
+ Storage: fstree.New(
+ fstree.WithPath(filepath.Join(t.TempDir(), "blob"))),
+ },
+ }),
+ },
+ })
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- obj := generateObject(t)
- addAttribute(obj, "foo", "bar")
- addPayload(obj, tc.payloadSize)
+ obj := testutil.GenerateObject()
+ testutil.AddAttribute(obj, "foo", "bar")
+ testutil.AddPayload(obj, tc.payloadSize)
addr := object.AddressOf(obj)
- payload := slice.Copy(obj.Payload())
+ payload := bytes.Clone(obj.Payload())
- var putPrm shard.PutPrm
+ var putPrm PutPrm
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- var rngPrm shard.RngPrm
+ var rngPrm RngPrm
rngPrm.SetAddress(addr)
rngPrm.SetRange(tc.rng.GetOffset(), tc.rng.GetLength())
- res, err := sh.GetRange(rngPrm)
+ res, err := sh.GetRange(context.Background(), rngPrm)
if tc.hasErr {
- require.ErrorAs(t, err, &apistatus.ObjectOutOfRange{})
+ var target *apistatus.ObjectOutOfRange
+ require.ErrorAs(t, err, &target)
} else {
require.Equal(t,
payload[tc.rng.GetOffset():tc.rng.GetOffset()+tc.rng.GetLength()],
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
new file mode 100644
index 0000000000..20f1f2b6f8
--- /dev/null
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -0,0 +1,193 @@
+package shard
+
+import (
+ "context"
+ "errors"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+)
+
+var ErrRebuildInProgress = errors.New("shard rebuild in progress")
+
+type rebuildTask struct {
+ concurrencyLimiter common.RebuildLimiter
+ fillPercent int
+}
+
+type rebuilder struct {
+ mtx *sync.Mutex
+ wg *sync.WaitGroup
+ cancel func()
+ done chan struct{}
+ tasks chan rebuildTask
+}
+
+func newRebuilder() *rebuilder {
+ return &rebuilder{
+ mtx: &sync.Mutex{},
+ wg: &sync.WaitGroup{},
+ tasks: make(chan rebuildTask),
+ }
+}
+
+func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.done != nil {
+ return // already started
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ r.cancel = cancel
+ r.done = make(chan struct{})
+ r.wg.Add(1)
+ go func() {
+ defer r.wg.Done()
+ for {
+ select {
+ case <-r.done:
+ return
+ case t, ok := <-r.tasks:
+ if !ok {
+ continue
+ }
+ runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter)
+ }
+ }
+ }()
+}
+
+func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
+ fillPercent int, concLimiter common.RebuildLimiter,
+) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ log.Info(ctx, logs.BlobstoreRebuildStarted)
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
+ if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil {
+ log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err))
+ } else {
+ log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully)
+ }
+}
+
+func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int,
+) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case r.tasks <- rebuildTask{
+ concurrencyLimiter: limiter,
+ fillPercent: fillPercent,
+ }:
+ return nil
+ default:
+ return ErrRebuildInProgress
+ }
+}
+
+func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.done != nil {
+ close(r.done)
+ }
+ if r.cancel != nil {
+ r.cancel()
+ }
+ r.wg.Wait()
+ r.cancel = nil
+ r.done = nil
+ log.Info(ctx, logs.BlobstoreRebuildStopped)
+}
+
+var errMBIsNotAvailable = errors.New("metabase is not available")
+
+type mbStorageIDUpdate struct {
+ mb *meta.DB
+}
+
+func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if u.mb == nil {
+ return errMBIsNotAvailable
+ }
+
+ var prm meta.UpdateStorageIDPrm
+ prm.SetAddress(addr)
+ prm.SetStorageID(storageID)
+ _, err := u.mb.UpdateStorageID(ctx, prm)
+ return err
+}
+
+type RebuildPrm struct {
+ ConcurrencyLimiter common.ConcurrencyLimiter
+ TargetFillPercent uint32
+}
+
+func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ScheduleRebuild",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Int64("target_fill_percent", int64(p.TargetFillPercent)),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ limiter := &rebuildLimiter{
+ concurrencyLimiter: p.ConcurrencyLimiter,
+ rateLimiter: s.opsLimiter,
+ }
+ return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent))
+}
+
+var _ common.RebuildLimiter = (*rebuildLimiter)(nil)
+
+type rebuildLimiter struct {
+ concurrencyLimiter common.ConcurrencyLimiter
+ rateLimiter qos.Limiter
+}
+
+func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
+ return r.concurrencyLimiter.AcquireWorkSlot(ctx)
+}
+
+func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) {
+ release, err := r.rateLimiter.ReadRequest(ctx)
+ return common.ReleaseFunc(release), err
+}
+
+func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) {
+ release, err := r.rateLimiter.WriteRequest(ctx)
+ return common.ReleaseFunc(release), err
+}
diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go
new file mode 100644
index 0000000000..d90343265c
--- /dev/null
+++ b/pkg/local_object_storage/shard/refill_test.go
@@ -0,0 +1,76 @@
+package shard
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+func BenchmarkRefillMetabase(b *testing.B) {
+ b.Run("100 objects", func(b *testing.B) {
+ benchRefillMetabase(b, 100)
+ })
+
+ b.Run("1000 objects", func(b *testing.B) {
+ benchRefillMetabase(b, 1000)
+ })
+
+ b.Run("2000 objects", func(b *testing.B) {
+ benchRefillMetabase(b, 2000)
+ })
+
+ b.Run("5000 objects", func(b *testing.B) {
+ benchRefillMetabase(b, 5000)
+ })
+}
+
+func benchRefillMetabase(b *testing.B, objectsCount int) {
+ sh := newCustomShard(b, false, shardOptions{
+ additionalShardOptions: []Option{WithRefillMetabaseWorkersCount(shardconfig.RefillMetabaseWorkersCountDefault)},
+ })
+
+ defer func() { require.NoError(b, sh.Close(context.Background())) }()
+
+ var putPrm PutPrm
+
+ for range objectsCount / 2 {
+ obj := testutil.GenerateObject()
+ testutil.AddAttribute(obj, "foo", "bar")
+ testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj
+
+ putPrm.SetObject(obj)
+
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(b, err)
+ }
+
+ for range objectsCount / 2 {
+ obj := testutil.GenerateObject()
+ testutil.AddAttribute(obj, "foo", "bar")
+ obj.SetID(oidtest.ID())
+ testutil.AddPayload(obj, 1<<20) // fstree obj
+
+ putPrm.SetObject(obj)
+
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(b, err)
+ }
+
+ require.NoError(b, sh.Close(context.Background()))
+ require.NoError(b, os.Remove(sh.metaBase.DumpInfo().Path))
+
+ require.NoError(b, sh.Open(context.Background()))
+ sh.cfg.refillMetabase = true
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ require.NoError(b, sh.Init(context.Background()))
+
+ require.NoError(b, sh.Close(context.Background()))
+}
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index fbe4d61218..e563f390b7 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -1,6 +1,7 @@
package shard
import (
+ "context"
"os"
"path/filepath"
"testing"
@@ -10,7 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -19,14 +20,14 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
)
func TestShardReload(t *testing.T) {
+ t.Parallel()
+
p := t.Name()
defer os.RemoveAll(p)
-
- l := &logger.Logger{Logger: zaptest.NewLogger(t)}
+ l := test.NewLogger(t)
blobOpts := []blobstor.Option{
blobstor.WithLogger(l),
blobstor.WithStorages([]blobstor.SubStorage{
@@ -40,18 +41,26 @@ func TestShardReload(t *testing.T) {
metaOpts := []meta.Option{
meta.WithPath(filepath.Join(p, "meta")),
- meta.WithEpochState(epochState{})}
+ meta.WithEpochState(epochState{}),
+ }
opts := []Option{
+ WithID(NewIDFromBytes([]byte{})),
WithLogger(l),
WithBlobStorOptions(blobOpts...),
WithMetaBaseOptions(metaOpts...),
WithPiloramaOptions(
- pilorama.WithPath(filepath.Join(p, "pilorama")))}
+ pilorama.WithPath(filepath.Join(p, "pilorama"))),
+ WithMetricsWriter(newMetricStore()),
+ }
sh := New(opts...)
- require.NoError(t, sh.Open())
- require.NoError(t, sh.Init())
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
+
+ defer func() {
+ require.NoError(t, sh.Close(context.Background()))
+ }()
objects := make([]objAddr, 5)
for i := range objects {
@@ -63,9 +72,9 @@ func TestShardReload(t *testing.T) {
checkHasObjects := func(t *testing.T, exists bool) {
for i := range objects {
var prm ExistsPrm
- prm.SetAddress(objects[i].addr)
+ prm.Address = objects[i].addr
- res, err := sh.Exists(prm)
+ res, err := sh.Exists(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, exists, res.Exists(), "object #%d is missing", i)
}
@@ -74,7 +83,7 @@ func TestShardReload(t *testing.T) {
checkHasObjects(t, true)
t.Run("same config, no-op", func(t *testing.T) {
- require.NoError(t, sh.Reload(opts...))
+ require.NoError(t, sh.Reload(context.Background(), opts...))
checkHasObjects(t, true)
})
@@ -85,7 +94,7 @@ func TestShardReload(t *testing.T) {
}
newOpts := newShardOpts(filepath.Join(p, "meta1"), false)
- require.NoError(t, sh.Reload(newOpts...))
+ require.NoError(t, sh.Reload(context.Background(), newOpts...))
checkHasObjects(t, false) // new path, but no resync
@@ -96,7 +105,7 @@ func TestShardReload(t *testing.T) {
})
newOpts = newShardOpts(filepath.Join(p, "meta2"), true)
- require.NoError(t, sh.Reload(newOpts...))
+ require.NoError(t, sh.Reload(context.Background(), newOpts...))
checkHasObjects(t, true) // all objects are restored, including the new one
@@ -105,7 +114,7 @@ func TestShardReload(t *testing.T) {
require.NoError(t, os.WriteFile(badPath, []byte{1}, 0))
newOpts = newShardOpts(badPath, true)
- require.Error(t, sh.Reload(newOpts...))
+ require.Error(t, sh.Reload(context.Background(), newOpts...))
// Cleanup is done, no panic.
obj := newObject()
@@ -116,7 +125,7 @@ func TestShardReload(t *testing.T) {
// Successive reload produces no undesired effects.
require.NoError(t, os.RemoveAll(badPath))
- require.NoError(t, sh.Reload(newOpts...))
+ require.NoError(t, sh.Reload(context.Background(), newOpts...))
obj = newObject()
require.NoError(t, putObject(sh, obj))
@@ -131,7 +140,7 @@ func putObject(sh *Shard, obj *objectSDK.Object) error {
var prm PutPrm
prm.SetObject(obj)
- _, err := sh.Put(prm)
+ _, err := sh.Put(context.Background(), prm)
return err
}
diff --git a/pkg/local_object_storage/shard/restore.go b/pkg/local_object_storage/shard/restore.go
deleted file mode 100644
index 73dc1d1784..0000000000
--- a/pkg/local_object_storage/shard/restore.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package shard
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "io"
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
-)
-
-// ErrInvalidMagic is returned when dump format is invalid.
-var ErrInvalidMagic = logicerr.New("invalid magic")
-
-// RestorePrm groups the parameters of Restore operation.
-type RestorePrm struct {
- path string
- stream io.Reader
- ignoreErrors bool
-}
-
-// WithPath is a Restore option to set the destination path.
-func (p *RestorePrm) WithPath(path string) {
- p.path = path
-}
-
-// WithStream is a Restore option to set the stream to read objects from.
-// It takes priority over `WithPath` option.
-func (p *RestorePrm) WithStream(r io.Reader) {
- p.stream = r
-}
-
-// WithIgnoreErrors is a Restore option which allows to ignore errors encountered during restore.
-// Corrupted objects will not be processed.
-func (p *RestorePrm) WithIgnoreErrors(ignore bool) {
- p.ignoreErrors = ignore
-}
-
-// RestoreRes groups the result fields of Restore operation.
-type RestoreRes struct {
- count int
- failed int
-}
-
-// Count return amount of object written.
-func (r RestoreRes) Count() int {
- return r.count
-}
-
-// FailCount return amount of object skipped.
-func (r RestoreRes) FailCount() int {
- return r.failed
-}
-
-// Restore restores objects from the dump prepared by Dump.
-//
-// Returns any error encountered.
-func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) {
- // Disallow changing mode during restore.
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.ReadOnly() {
- return RestoreRes{}, ErrReadOnlyMode
- }
-
- r := prm.stream
- if r == nil {
- f, err := os.OpenFile(prm.path, os.O_RDONLY, os.ModeExclusive)
- if err != nil {
- return RestoreRes{}, err
- }
- defer f.Close()
-
- r = f
- }
-
- var m [4]byte
- _, _ = io.ReadFull(r, m[:])
- if !bytes.Equal(m[:], dumpMagic) {
- return RestoreRes{}, ErrInvalidMagic
- }
-
- var putPrm PutPrm
-
- var count, failCount int
- var data []byte
- var size [4]byte
- for {
- // If there are less than 4 bytes left, `Read` returns nil error instead of
- // io.ErrUnexpectedEOF, thus `ReadFull` is used.
- _, err := io.ReadFull(r, size[:])
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
- return RestoreRes{}, err
- }
-
- sz := binary.LittleEndian.Uint32(size[:])
- if uint32(cap(data)) < sz {
- data = make([]byte, sz)
- } else {
- data = data[:sz]
- }
-
- _, err = r.Read(data)
- if err != nil {
- return RestoreRes{}, err
- }
-
- obj := object.New()
- err = obj.Unmarshal(data)
- if err != nil {
- if prm.ignoreErrors {
- failCount++
- continue
- }
- return RestoreRes{}, err
- }
-
- putPrm.SetObject(obj)
- _, err = s.Put(putPrm)
- if err != nil && !IsErrObjectExpired(err) && !IsErrRemoved(err) {
- return RestoreRes{}, err
- }
-
- count++
- }
-
- return RestoreRes{count: count, failed: failCount}, nil
-}
diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go
index 4bb467d48c..fbc751e267 100644
--- a/pkg/local_object_storage/shard/select.go
+++ b/pkg/local_object_storage/shard/select.go
@@ -1,18 +1,23 @@
package shard
import (
+ "context"
"fmt"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters object.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ isIndexedContainer bool
}
// SelectRes groups the resulting values of Select operation.
@@ -21,12 +26,13 @@ type SelectRes struct {
}
// SetContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) SetContainerID(cnr cid.ID) {
+func (p *SelectPrm) SetContainerID(cnr cid.ID, isIndexedContainer bool) {
p.cnr = cnr
+ p.isIndexedContainer = isIndexedContainer
}
// SetFilters is a Select option to set the object filters.
-func (p *SelectPrm) SetFilters(fs object.SearchFilters) {
+func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) {
p.filters = fs
}
@@ -39,7 +45,14 @@ func (r SelectRes) AddressList() []oid.Address {
//
// Returns any error encountered that
// did not allow to completely select the objects.
-func (s *Shard) Select(prm SelectPrm) (SelectRes, error) {
+func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Select",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", prm.cnr.EncodeToString()),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -47,13 +60,20 @@ func (s *Shard) Select(prm SelectPrm) (SelectRes, error) {
return SelectRes{}, ErrDegradedMode
}
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return SelectRes{}, nil
+ }
+ defer release()
+
var selectPrm meta.SelectPrm
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
+ selectPrm.SetUseAttributeIndex(prm.isIndexedContainer)
- mRes, err := s.metaBase.Select(selectPrm)
+ mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
- return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
+ return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err)
}
return SelectRes{
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index a0fd077c7b..f21541d9d3 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -3,8 +3,12 @@ package shard
import (
"context"
"sync"
+ "sync/atomic"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -12,6 +16,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
@@ -31,6 +36,14 @@ type Shard struct {
metaBase *meta.DB
tsSource TombstoneSource
+
+ rb *rebuilder
+
+ opsLimiter *atomicOpsLimiter
+
+ gcCancel atomic.Value
+ setModeRequested atomic.Bool
+ writecacheSealCancel atomic.Pointer[writecacheSealCanceler]
}
// Option represents Shard's constructor option.
@@ -40,42 +53,19 @@ type Option func(*cfg)
type ExpiredTombstonesCallback func(context.Context, []meta.TombstonedObject)
// ExpiredObjectsCallback is a callback handling list of expired objects.
-type ExpiredObjectsCallback func(context.Context, []oid.Address)
+type ExpiredObjectsCallback func(context.Context, uint64, []oid.Address)
// DeletedLockCallback is a callback handling list of deleted LOCK objects.
type DeletedLockCallback func(context.Context, []oid.Address)
-// MetricsWriter is an interface that must store shard's metrics.
-type MetricsWriter interface {
- // SetObjectCounter must set object counter taking into account object type.
- SetObjectCounter(objectType string, v uint64)
- // AddToObjectCounter must update object counter taking into account object
- // type.
- // Negative parameter must decrease the counter.
- AddToObjectCounter(objectType string, delta int)
- // AddToContainerSize must add a value to the container size.
- // Value can be negative.
- AddToContainerSize(cnr string, value int64)
- // AddToPayloadSize must add a value to the payload size.
- // Value can be negative.
- AddToPayloadSize(value int64)
- // IncObjectCounter must increment shard's object counter taking into account
- // object type.
- IncObjectCounter(objectType string)
- // DecObjectCounter must decrement shard's object counter taking into account
- // object type.
- DecObjectCounter(objectType string)
- // SetShardID must set (update) the shard identifier that will be used in
- // metrics.
- SetShardID(id string)
- // SetReadonly must set shard readonly state.
- SetReadonly(readonly bool)
-}
+// EmptyContainersCallback is a callback hanfling list of zero-size and zero-count containers.
+type EmptyContainersCallback func(context.Context, []cid.ID)
type cfg struct {
m sync.RWMutex
- refillMetabase bool
+ refillMetabase bool
+ refillMetabaseWorkersCount int
rmBatchSize int
@@ -101,19 +91,30 @@ type cfg struct {
deletedLockCallBack DeletedLockCallback
+ zeroSizeContainersCallback EmptyContainersCallback
+ zeroCountContainersCallback EmptyContainersCallback
+
tsSource TombstoneSource
metricsWriter MetricsWriter
- reportErrorFunc func(selfID string, message string, err error)
+ reportErrorFunc func(ctx context.Context, selfID string, message string, err error)
+
+ containerInfo container.InfoProvider
+
+ configOpsLimiter qos.Limiter
}
func defaultCfg() *cfg {
return &cfg{
- rmBatchSize: 100,
- log: &logger.Logger{Logger: zap.L()},
- gcCfg: defaultGCCfg(),
- reportErrorFunc: func(string, string, error) {},
+ rmBatchSize: 100,
+ log: logger.NewLoggerWrapper(zap.L()),
+ gcCfg: defaultGCCfg(),
+ reportErrorFunc: func(context.Context, string, string, error) {},
+ zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
+ zeroCountContainersCallback: func(context.Context, []cid.ID) {},
+ metricsWriter: noopMetrics{},
+ configOpsLimiter: qos.NewNoopLimiter(),
}
}
@@ -129,14 +130,15 @@ func New(opts ...Option) *Shard {
mb := meta.New(c.metaOpts...)
s := &Shard{
- cfg: c,
- blobStor: bs,
- metaBase: mb,
- tsSource: c.tsSource,
+ cfg: c,
+ blobStor: bs,
+ metaBase: mb,
+ tsSource: c.tsSource,
+ opsLimiter: newAtomicOpsLimiter(c.configOpsLimiter),
}
- reportFunc := func(msg string, err error) {
- s.reportErrorFunc(s.ID().String(), msg, err)
+ reportFunc := func(ctx context.Context, msg string, err error) {
+ s.reportErrorFunc(ctx, s.ID().String(), msg, err)
}
s.blobStor.SetReportErrorFunc(reportFunc)
@@ -146,7 +148,9 @@ func New(opts ...Option) *Shard {
append(c.writeCacheOpts,
writecache.WithReportErrorFunc(reportFunc),
writecache.WithBlobstor(bs),
- writecache.WithMetabase(mb))...)
+ writecache.WithMetabase(mb),
+ writecache.WithQoSLimiter(s.opsLimiter))...)
+ s.writeCache.GetMetrics().SetPath(s.writeCache.DumpInfo().Path)
}
if s.piloramaOpts != nil {
@@ -154,6 +158,7 @@ func New(opts ...Option) *Shard {
}
s.fillInfo()
+ s.writecacheSealCancel.Store(notInitializedCancel)
return s
}
@@ -180,12 +185,19 @@ func WithMetaBaseOptions(opts ...meta.Option) Option {
}
// WithWriteCacheOptions returns option to set internal write cache options.
-func WithWriteCacheOptions(opts ...writecache.Option) Option {
+func WithWriteCacheOptions(opts []writecache.Option) Option {
return func(c *cfg) {
c.writeCacheOpts = opts
}
}
+// WithWriteCacheMetrics returns an option to set the metrics register used by the write cache.
+func WithWriteCacheMetrics(wcMetrics writecache.Metrics) Option {
+ return func(c *cfg) {
+ c.writeCacheOpts = append(c.writeCacheOpts, writecache.WithMetrics(wcMetrics))
+ }
+}
+
// WithPiloramaOptions returns option to set internal write cache options.
func WithPiloramaOptions(opts ...pilorama.Option) Option {
return func(c *cfg) {
@@ -197,7 +209,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option {
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = l
- c.gcCfg.log = l
+ c.gcCfg.log = l.WithTag(logger.TagGC)
}
}
@@ -209,12 +221,12 @@ func WithWriteCache(use bool) Option {
}
// hasWriteCache returns bool if write cache exists on shards.
-func (s Shard) hasWriteCache() bool {
- return s.cfg.useWriteCache
+func (s *Shard) hasWriteCache() bool {
+ return s.useWriteCache
}
-// needRefillMetabase returns true if metabase is needed to be refilled.
-func (s Shard) needRefillMetabase() bool {
+// NeedRefillMetabase returns true if metabase is needed to be refilled.
+func (s *Shard) NeedRefillMetabase() bool {
return s.cfg.refillMetabase
}
@@ -265,6 +277,13 @@ func WithRefillMetabase(v bool) Option {
}
}
+// WithRefillMetabaseWorkersCount returns option to set count of workers to refill the Metabase on Shard's initialization step.
+func WithRefillMetabaseWorkersCount(v int) Option {
+ return func(c *cfg) {
+ c.refillMetabaseWorkersCount = v
+ }
+}
+
// WithMode returns option to set shard's mode. Mode must be one of the predefined:
// - mode.ReadWrite;
// - mode.ReadOnly.
@@ -297,24 +316,82 @@ func WithMetricsWriter(v MetricsWriter) Option {
}
}
+// WithGCMetrics returns option to specify storage of the GC metrics.
+func WithGCMetrics(v GCMectrics) Option {
+ return func(c *cfg) {
+ c.gcCfg.metrics = v
+ }
+}
+
// WithReportErrorFunc returns option to specify callback for handling storage-related errors
// in the background workers.
-func WithReportErrorFunc(f func(selfID string, message string, err error)) Option {
+func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option {
return func(c *cfg) {
c.reportErrorFunc = f
}
}
-func (s *Shard) fillInfo() {
- s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
- s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
- s.cfg.info.Mode = s.GetMode()
+// WithExpiredCollectorBatchSize returns option to set batch size
+// of expired object collection operation.
+func WithExpiredCollectorBatchSize(size int) Option {
+ return func(c *cfg) {
+ c.gcCfg.expiredCollectorBatchSize = size
+ }
+}
- if s.cfg.useWriteCache {
- s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo()
+// WithExpiredCollectorWorkerCount returns option to set concurrent
+// workers count of expired object collection operation.
+func WithExpiredCollectorWorkerCount(count int) Option {
+ return func(c *cfg) {
+ c.gcCfg.expiredCollectorWorkerCount = count
+ }
+}
+
+// WithDisabledGC disables GC.
+// For testing purposes only.
+func WithDisabledGC() Option {
+ return func(c *cfg) {
+ c.gcCfg.testHookRemover = func(_ context.Context) gcRunResult { return gcRunResult{} }
+ }
+}
+
+// WithZeroSizeCallback returns option to set zero-size containers callback.
+func WithZeroSizeCallback(cb EmptyContainersCallback) Option {
+ return func(c *cfg) {
+ c.zeroSizeContainersCallback = cb
+ }
+}
+
+// WithZeroCountCallback returns option to set zero-count containers callback.
+func WithZeroCountCallback(cb EmptyContainersCallback) Option {
+ return func(c *cfg) {
+ c.zeroCountContainersCallback = cb
+ }
+}
+
+// WithContainerInfoProvider returns option to set container info provider.
+func WithContainerInfoProvider(containerInfo container.InfoProvider) Option {
+ return func(c *cfg) {
+ c.containerInfo = containerInfo
+ }
+}
+
+func WithLimiter(l qos.Limiter) Option {
+ return func(c *cfg) {
+ c.configOpsLimiter = l
+ }
+}
+
+func (s *Shard) fillInfo() {
+ s.info.MetaBaseInfo = s.metaBase.DumpInfo()
+ s.info.BlobStorInfo = s.blobStor.DumpInfo()
+ s.info.Mode = s.GetMode()
+
+ if s.useWriteCache {
+ s.info.WriteCacheInfo = s.writeCache.DumpInfo()
}
if s.pilorama != nil {
- s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo()
+ s.info.PiloramaInfo = s.pilorama.DumpInfo()
}
}
@@ -327,69 +404,159 @@ const (
// counter type (excludes objects that are
// stored but unavailable).
logical = "logic"
+ // user is an available small or big regular object.
+ user = "user"
)
-func (s *Shard) updateMetrics() {
- if s.cfg.metricsWriter != nil && !s.GetMode().NoMetabase() {
- cc, err := s.metaBase.ObjectCounters()
- if err != nil {
- s.log.Warn("meta: object counter read",
- zap.Error(err),
- )
-
- return
- }
-
- s.cfg.metricsWriter.SetObjectCounter(physical, cc.Phy())
- s.cfg.metricsWriter.SetObjectCounter(logical, cc.Logic())
-
- cnrList, err := s.metaBase.Containers()
- if err != nil {
- s.log.Warn("meta: can't read container list", zap.Error(err))
- return
- }
-
- var totalPayload uint64
-
- for i := range cnrList {
- size, err := s.metaBase.ContainerSize(cnrList[i])
- if err != nil {
- s.log.Warn("meta: can't read container size",
- zap.String("cid", cnrList[i].EncodeToString()),
- zap.Error(err))
- continue
- }
- s.metricsWriter.AddToContainerSize(cnrList[i].EncodeToString(), int64(size))
- totalPayload += size
- }
-
- s.metricsWriter.AddToPayloadSize(int64(totalPayload))
+func (s *Shard) updateMetrics(ctx context.Context) {
+ if s.GetMode().NoMetabase() {
+ return
}
+
+ cc, err := s.metaBase.ObjectCounters()
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardMetaObjectCounterRead,
+ zap.Error(err),
+ )
+
+ return
+ }
+
+ s.setObjectCounterBy(physical, cc.Phy)
+ s.setObjectCounterBy(logical, cc.Logic)
+ s.setObjectCounterBy(user, cc.User)
+
+ cnrList, err := s.metaBase.Containers(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err))
+ return
+ }
+
+ var totalPayload uint64
+
+ for i := range cnrList {
+ size, err := s.metaBase.ContainerSize(cnrList[i])
+ if err != nil {
+ s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize,
+ zap.String("cid", cnrList[i].EncodeToString()),
+ zap.Error(err))
+ continue
+ }
+ s.addToContainerSize(cnrList[i].EncodeToString(), int64(size))
+ totalPayload += size
+ }
+
+ s.addToPayloadSize(int64(totalPayload))
+
+ contCount, err := s.metaBase.ContainerCounters(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err))
+ return
+ }
+ for contID, count := range contCount.Counts {
+ s.setContainerObjectsCount(contID.EncodeToString(), physical, count.Phy)
+ s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic)
+ s.setContainerObjectsCount(contID.EncodeToString(), user, count.User)
+ }
+ s.metricsWriter.SetMode(s.info.Mode)
}
// incObjectCounter increment both physical and logical object
// counters.
-func (s *Shard) incObjectCounter() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.IncObjectCounter(physical)
- s.cfg.metricsWriter.IncObjectCounter(logical)
+func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
+ s.metricsWriter.IncObjectCounter(physical)
+ s.metricsWriter.IncObjectCounter(logical)
+ s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
+ s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
+ if isUser {
+ s.metricsWriter.IncObjectCounter(user)
+ s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
+ if v > 0 {
+ s.metricsWriter.AddToObjectCounter(typ, -int(v))
+ }
+}
+
+func (s *Shard) setObjectCounterBy(typ string, v uint64) {
+ if v > 0 {
+ s.metricsWriter.SetObjectCounter(typ, v)
+ }
+}
+
+func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
+ for cnrID, count := range byCnr {
+ if count.Phy > 0 {
+ s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
+ }
+ if count.Logic > 0 {
+ s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
+ }
+ if count.User > 0 {
+ s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
+ }
}
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.AddToContainerSize(cnr, size)
+ if size != 0 {
+ s.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.AddToPayloadSize(size)
+ if size != 0 {
+ s.metricsWriter.AddToPayloadSize(size)
}
}
+
+func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) {
+ if v > 0 {
+ s.metricsWriter.SetContainerObjectsCount(cnr, typ, v)
+ }
+}
+
+func (s *Shard) SetEvacuationInProgress(val bool) {
+ s.m.Lock()
+ defer s.m.Unlock()
+ s.info.EvacuationInProgress = val
+ s.metricsWriter.SetEvacuationInProgress(val)
+}
+
+var _ qos.Limiter = &atomicOpsLimiter{}
+
+func newAtomicOpsLimiter(l qos.Limiter) *atomicOpsLimiter {
+ result := &atomicOpsLimiter{}
+ result.ptr.Store(&qosLimiterHolder{Limiter: l})
+ return result
+}
+
+type atomicOpsLimiter struct {
+ ptr atomic.Pointer[qosLimiterHolder]
+}
+
+func (a *atomicOpsLimiter) Close() {
+ a.ptr.Load().Close()
+}
+
+func (a *atomicOpsLimiter) ReadRequest(ctx context.Context) (qos.ReleaseFunc, error) {
+ return a.ptr.Load().ReadRequest(ctx)
+}
+
+func (a *atomicOpsLimiter) SetMetrics(m qos.Metrics) {
+ a.ptr.Load().SetMetrics(m)
+}
+
+func (a *atomicOpsLimiter) SetParentID(id string) {
+ a.ptr.Load().SetParentID(id)
+}
+
+func (a *atomicOpsLimiter) WriteRequest(ctx context.Context) (qos.ReleaseFunc, error) {
+ return a.ptr.Load().WriteRequest(ctx)
+}
+
+type qosLimiterHolder struct {
+ qos.Limiter
+}
diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go
index a6da539184..84be71c4db 100644
--- a/pkg/local_object_storage/shard/shard_test.go
+++ b/pkg/local_object_storage/shard/shard_test.go
@@ -1,148 +1,114 @@
-package shard_test
+package shard
import (
- "crypto/sha256"
- "math/rand"
+ "context"
"path/filepath"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
- "go.uber.org/zap"
- "go.uber.org/zap/zaptest"
)
-type epochState struct{}
+type epochState struct {
+ Value uint64
+}
func (s epochState) CurrentEpoch() uint64 {
- return 0
+ return s.Value
}
-func newShard(t testing.TB, enableWriteCache bool) *shard.Shard {
- return newCustomShard(t, t.TempDir(), enableWriteCache,
- nil,
- nil)
+type shardOptions struct {
+ rootPath string
+ wcOpts []writecache.Option
+ bsOpts []blobstor.Option
+ metaOptions []meta.Option
+
+ additionalShardOptions []Option
}
-func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts []writecache.Option, bsOpts []blobstor.Option) *shard.Shard {
- if enableWriteCache {
- rootPath = filepath.Join(rootPath, "wc")
- } else {
- rootPath = filepath.Join(rootPath, "nowc")
+func newShard(t testing.TB, enableWriteCache bool) *Shard {
+ return newCustomShard(t, enableWriteCache, shardOptions{})
+}
+
+func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard {
+ if o.rootPath == "" {
+ o.rootPath = t.TempDir()
}
- if bsOpts == nil {
- bsOpts = []blobstor.Option{
- blobstor.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ var sh *Shard
+ if enableWriteCache {
+ o.wcOpts = append(
+ []writecache.Option{writecache.WithPath(filepath.Join(o.rootPath, "wcache"))},
+ o.wcOpts...)
+ }
+
+ if o.bsOpts == nil {
+ o.bsOpts = []blobstor.Option{
+ blobstor.WithLogger(test.NewLogger(t)),
blobstor.WithStorages([]blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
- blobovniczatree.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
+ context.Background(),
+ blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
+ blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
- Policy: func(_ *object.Object, data []byte) bool {
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
return len(data) <= 1<<20
},
},
{
Storage: fstree.New(
- fstree.WithPath(filepath.Join(rootPath, "blob"))),
+ fstree.WithPath(filepath.Join(o.rootPath, "blob"))),
},
}),
}
}
- opts := []shard.Option{
- shard.WithLogger(&logger.Logger{Logger: zap.L()}),
- shard.WithBlobStorOptions(bsOpts...),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(rootPath, "meta")),
- meta.WithEpochState(epochState{}),
- ),
- shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))),
- shard.WithWriteCache(enableWriteCache),
- shard.WithWriteCacheOptions(
- append(
- []writecache.Option{writecache.WithPath(filepath.Join(rootPath, "wcache"))},
- wcOpts...)...,
+ opts := []Option{
+ WithID(NewIDFromBytes([]byte{})),
+ WithLogger(test.NewLogger(t)),
+ WithBlobStorOptions(o.bsOpts...),
+ WithMetaBaseOptions(
+ append([]meta.Option{
+ meta.WithPath(filepath.Join(o.rootPath, "meta")), meta.WithEpochState(epochState{}),
+ },
+ o.metaOptions...)...,
),
+ WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))),
+ WithWriteCache(enableWriteCache),
+ WithWriteCacheOptions(o.wcOpts),
+ WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(ctx, addresses)
+ }),
+ WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
+ sh.HandleExpiredLocks(ctx, epoch, a)
+ }),
+ WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ require.NoError(t, err)
+ return pool
+ }),
+ WithGCRemoverSleepInterval(100 * time.Millisecond),
}
+ opts = append(opts, o.additionalShardOptions...)
- sh := shard.New(opts...)
+ sh = New(opts...)
- require.NoError(t, sh.Open())
- require.NoError(t, sh.Init())
+ require.NoError(t, sh.Open(context.Background()))
+ require.NoError(t, sh.Init(context.Background()))
return sh
}
-
-func releaseShard(s *shard.Shard, t testing.TB) {
- require.NoError(t, s.Close())
-}
-
-func generateObject(t *testing.T) *object.Object {
- return generateObjectWithCID(t, cidtest.ID())
-}
-
-func generateObjectWithCID(t *testing.T, cnr cid.ID) *object.Object {
- data := make([]byte, 32)
- rand.Read(data)
- return generateObjectWithPayload(cnr, data)
-}
-
-func generateObjectWithPayload(cnr cid.ID, data []byte) *object.Object {
- var ver version.Version
- ver.SetMajor(2)
- ver.SetMinor(1)
-
- var csum checksum.Checksum
- csum.SetSHA256(sha256.Sum256(data))
-
- var csumTZ checksum.Checksum
- csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
-
- obj := object.New()
- obj.SetID(oidtest.ID())
- obj.SetOwnerID(usertest.ID())
- obj.SetContainerID(cnr)
- obj.SetVersion(&ver)
- obj.SetPayload(data)
- obj.SetPayloadChecksum(csum)
- obj.SetPayloadHomomorphicHash(csumTZ)
-
- return obj
-}
-
-func addAttribute(obj *object.Object, key, val string) {
- var attr object.Attribute
- attr.SetKey(key)
- attr.SetValue(val)
-
- attrs := obj.Attributes()
- attrs = append(attrs, attr)
- obj.SetAttributes(attrs...)
-}
-
-func addPayload(obj *object.Object, size int) {
- buf := make([]byte, size)
- _, _ = rand.Read(buf)
-
- obj.SetPayload(buf)
- obj.SetPayloadSize(uint64(size))
-}
diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go
index f7f7b2ca4c..b1232707f1 100644
--- a/pkg/local_object_storage/shard/shutdown_test.go
+++ b/pkg/local_object_storage/shard/shutdown_test.go
@@ -1,18 +1,22 @@
-package shard_test
+package shard
import (
- "math/rand"
+ "context"
+ "crypto/rand"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
)
func TestWriteCacheObjectLoss(t *testing.T) {
+ t.Parallel()
+
const (
smallSize = 1024
objCount = 100
@@ -27,34 +31,38 @@ func TestWriteCacheObjectLoss(t *testing.T) {
data := make([]byte, size)
rand.Read(data)
- objects[i] = generateObjectWithPayload(cidtest.ID(), data)
+ objects[i] = testutil.GenerateObjectWithCIDWithPayload(cidtest.ID(), data)
}
dir := t.TempDir()
wcOpts := []writecache.Option{
- writecache.WithSmallObjectSize(smallSize),
- writecache.WithMaxObjectSize(smallSize * 2)}
-
- sh := newCustomShard(t, dir, true, wcOpts, nil)
-
- var putPrm shard.PutPrm
-
- for i := range objects {
- putPrm.SetObject(objects[i])
- _, err := sh.Put(putPrm)
- require.NoError(t, err)
+ writecache.WithMaxObjectSize(smallSize * 2),
}
- require.NoError(t, sh.Close())
- sh = newCustomShard(t, dir, true, wcOpts, nil)
- defer releaseShard(sh, t)
+ sh := newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
- var getPrm shard.GetPrm
+ var errG errgroup.Group
+ for i := range objects {
+ obj := objects[i]
+ errG.Go(func() error {
+ var putPrm PutPrm
+ putPrm.SetObject(obj)
+ _, err := sh.Put(context.Background(), putPrm)
+ return err
+ })
+ }
+ require.NoError(t, errG.Wait())
+ require.NoError(t, sh.Close(context.Background()))
+
+ sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
+ defer func() { require.NoError(t, sh.Close(context.Background())) }()
+
+ var getPrm GetPrm
for i := range objects {
getPrm.SetAddress(object.AddressOf(objects[i]))
- _, err := sh.Get(getPrm)
+ _, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err, i)
}
}
diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go
index b9f9099976..db361a8bd2 100644
--- a/pkg/local_object_storage/shard/tree.go
+++ b/pkg/local_object_storage/shard/tree.go
@@ -1,9 +1,15 @@
package shard
import (
+ "context"
+ "strconv"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
var _ pilorama.Forest = (*Shard)(nil)
@@ -12,7 +18,18 @@ var _ pilorama.Forest = (*Shard)(nil)
var ErrPiloramaDisabled = logicerr.New("pilorama is disabled")
// TreeMove implements the pilorama.Forest interface.
-func (s *Shard) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
+func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeMove",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
@@ -23,11 +40,33 @@ func (s *Shard) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Mo
if s.info.Mode.ReadOnly() {
return nil, ErrReadOnlyMode
}
- return s.pilorama.TreeMove(d, treeID, m)
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+ return s.pilorama.TreeMove(ctx, d, treeID, m)
}
// TreeAddByPath implements the pilorama.Forest interface.
-func (s *Shard) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr string, path []string, meta []pilorama.KeyValue) ([]pilorama.Move, error) {
+func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, treeID string, attr string, path []string, meta []pilorama.KeyValue) ([]pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeAddByPath",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Int("meta_count", len(meta)),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
@@ -38,11 +77,29 @@ func (s *Shard) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr stri
if s.info.Mode.ReadOnly() {
return nil, ErrReadOnlyMode
}
- return s.pilorama.TreeAddByPath(d, treeID, attr, path, meta)
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+ return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta)
}
// TreeApply implements the pilorama.Forest interface.
-func (s *Shard) TreeApply(d pilorama.CIDDescriptor, treeID string, m *pilorama.Move, backgroundSync bool) error {
+func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApply",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.Bool("background", backgroundSync),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return ErrPiloramaDisabled
}
@@ -53,77 +110,432 @@ func (s *Shard) TreeApply(d pilorama.CIDDescriptor, treeID string, m *pilorama.M
if s.info.Mode.ReadOnly() {
return ErrReadOnlyMode
}
- return s.pilorama.TreeApply(d, treeID, m, backgroundSync)
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+ return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync)
+}
+
+// TreeApplyBatch implements the pilorama.Forest interface.
+func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyBatch",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ if s.pilorama == nil {
+ return ErrPiloramaDisabled
+ }
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+ return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m)
}
// TreeGetByPath implements the pilorama.Forest interface.
-func (s *Shard) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
+func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Bool("latest", latest),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
- return s.pilorama.TreeGetByPath(cid, treeID, attr, path, latest)
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+ return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
}
// TreeGetMeta implements the pilorama.Forest interface.
-func (s *Shard) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
+func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetMeta",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return pilorama.Meta{}, 0, ErrPiloramaDisabled
}
- return s.pilorama.TreeGetMeta(cid, treeID, nodeID)
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return pilorama.Meta{}, 0, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return pilorama.Meta{}, 0, err
+ }
+ defer release()
+ return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID)
}
// TreeGetChildren implements the pilorama.Forest interface.
-func (s *Shard) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) {
+func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]pilorama.NodeInfo, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetChildren",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
- return s.pilorama.TreeGetChildren(cid, treeID, nodeID)
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+ return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID)
+}
+
+// TreeSortedByFilename implements the pilorama.Forest interface.
+func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ if s.pilorama == nil {
+ return nil, last, ErrPiloramaDisabled
+ }
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, last, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, last, err
+ }
+ defer release()
+ return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
}
// TreeGetOpLog implements the pilorama.Forest interface.
-func (s *Shard) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
+func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetOpLog",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", strconv.FormatUint(height, 10)),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return pilorama.Move{}, ErrPiloramaDisabled
}
- return s.pilorama.TreeGetOpLog(cid, treeID, height)
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return pilorama.Move{}, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return pilorama.Move{}, err
+ }
+ defer release()
+ return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height)
}
// TreeDrop implements the pilorama.Forest interface.
-func (s *Shard) TreeDrop(cid cidSDK.ID, treeID string) error {
+func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeDrop",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return ErrPiloramaDisabled
}
- return s.pilorama.TreeDrop(cid, treeID)
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+ return s.pilorama.TreeDrop(ctx, cid, treeID)
}
// TreeList implements the pilorama.Forest interface.
-func (s *Shard) TreeList(cid cidSDK.ID) ([]string, error) {
+func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeList",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
- return s.pilorama.TreeList(cid)
-}
-// TreeExists implements the pilorama.Forest interface.
-func (s *Shard) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
- if s.pilorama == nil {
- return false, ErrPiloramaDisabled
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
}
- return s.pilorama.TreeExists(cid, treeID)
-}
-
-// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
-func (s *Shard) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
- if s.pilorama == nil {
- return ErrPiloramaDisabled
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
}
- return s.pilorama.TreeUpdateLastSyncHeight(cid, treeID, height)
+ defer release()
+ return s.pilorama.TreeList(ctx, cid)
}
-// TreeLastSyncHeight implements the pilorama.Forest interface.
-func (s *Shard) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
+func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeHeight",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
if s.pilorama == nil {
return 0, ErrPiloramaDisabled
}
- return s.pilorama.TreeLastSyncHeight(cid, treeID)
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
+ return s.pilorama.TreeHeight(ctx, cid, treeID)
+}
+
+// TreeExists implements the pilorama.Forest interface.
+func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeExists",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ if s.pilorama == nil {
+ return false, ErrPiloramaDisabled
+ }
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return false, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer release()
+ return s.pilorama.TreeExists(ctx, cid, treeID)
+}
+
+// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
+func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeUpdateLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", strconv.FormatUint(height, 10)),
+ ),
+ )
+ defer span.End()
+
+ if s.pilorama == nil {
+ return ErrPiloramaDisabled
+ }
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+ return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
+}
+
+// TreeLastSyncHeight implements the pilorama.Forest interface.
+func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ if s.pilorama == nil {
+ return 0, ErrPiloramaDisabled
+ }
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer release()
+ return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID)
+}
+
+func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm) (*pilorama.TreeListTreesResult, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeListTrees",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ ),
+ )
+ defer span.End()
+
+ if s.pilorama == nil {
+ return nil, ErrPiloramaDisabled
+ }
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+ release, err := s.opsLimiter.ReadRequest(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer release()
+ return s.pilorama.TreeListTrees(ctx, prm)
+}
+
+func (s *Shard) PiloramaEnabled() bool {
+ return s.pilorama != nil
+}
+
+func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *pilorama.Move) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyStream",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID)),
+ )
+ defer span.End()
+
+ if s.pilorama == nil {
+ return ErrPiloramaDisabled
+ }
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+ return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source)
}
diff --git a/pkg/local_object_storage/shard/weight.go b/pkg/local_object_storage/shard/weight.go
deleted file mode 100644
index 0ab5ad61dc..0000000000
--- a/pkg/local_object_storage/shard/weight.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package shard
-
-// WeightValues groups values of Shard weight parameters.
-type WeightValues struct {
- // Amount of free disk space. Measured in kilobytes.
- FreeSpace uint64
-}
-
-// WeightValues returns current weight values of the Shard.
-func (s *Shard) WeightValues() WeightValues {
- return s.info.WeightValues
-}
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index 7282f121ca..9edb89df8c 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -1,12 +1,31 @@
package shard
import (
+ "context"
"errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
)
+var (
+ dummyCancel = &writecacheSealCanceler{cancel: func() {}}
+ notInitializedCancel = &writecacheSealCanceler{cancel: func() {}}
+ errWriteCacheSealing = errors.New("writecache is already sealing or shard is not initialized")
+)
+
+type writecacheSealCanceler struct {
+ cancel context.CancelFunc
+}
+
// FlushWriteCachePrm represents parameters of a `FlushWriteCache` operation.
type FlushWriteCachePrm struct {
ignoreErrors bool
+ seal bool
}
// SetIgnoreErrors sets the flag to ignore read-errors during flush.
@@ -14,12 +33,25 @@ func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) {
p.ignoreErrors = ignore
}
+// SetSeal sets the flag to left writecache in read-only mode after flush.
+func (p *FlushWriteCachePrm) SetSeal(v bool) {
+ p.seal = v
+}
+
// errWriteCacheDisabled is returned when an operation on write-cache is performed,
// but write-cache is disabled.
var errWriteCacheDisabled = errors.New("write-cache is disabled")
// FlushWriteCache flushes all data from the write-cache.
-func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error {
+func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.FlushWriteCache",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Bool("ignore_errors", p.ignoreErrors),
+ attribute.Bool("seal", p.seal),
+ ))
+ defer span.End()
+
if !s.hasWriteCache() {
return errWriteCacheDisabled
}
@@ -35,5 +67,95 @@ func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error {
return ErrDegradedMode
}
- return s.writeCache.Flush(p.ignoreErrors)
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal)
+}
+
+type SealWriteCachePrm struct {
+ IgnoreErrors bool
+ Async bool
+ RestoreMode bool
+ Shrink bool
+}
+
+// SealWriteCache flushes all data from the write-cache and moves it to degraded read only mode.
+func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.SealWriteCache",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Bool("ignore_errors", p.IgnoreErrors),
+ attribute.Bool("restore_mode", p.RestoreMode),
+ ))
+ defer span.End()
+
+ if !s.hasWriteCache() {
+ return errWriteCacheDisabled
+ }
+
+ if p.Async {
+ ctx = context.WithoutCancel(ctx)
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ canceler := &writecacheSealCanceler{cancel: cancel}
+ if !s.writecacheSealCancel.CompareAndSwap(dummyCancel, canceler) {
+ return errWriteCacheSealing
+ }
+ s.m.RLock()
+ cleanup := func() {
+ s.m.RUnlock()
+ s.writecacheSealCancel.Store(dummyCancel)
+ }
+
+ if s.info.Mode.ReadOnly() {
+ cleanup()
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ cleanup()
+ return ErrDegradedMode
+ }
+
+ if !p.Async {
+ defer cleanup()
+ }
+ prm := writecache.SealPrm{IgnoreErrors: p.IgnoreErrors, RestoreMode: p.RestoreMode, Shrink: p.Shrink}
+ if p.Async {
+ started := make(chan struct{})
+ go func() {
+ close(started)
+ defer cleanup()
+
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
+ return
+ }
+ defer release()
+
+ s.log.Info(ctx, logs.StartedWritecacheSealAsync)
+ if err := s.writeCache.Seal(ctx, prm); err != nil {
+ s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
+ return
+ }
+ s.log.Info(ctx, logs.WritecacheSealCompletedAsync)
+ }()
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-started:
+ return nil
+ }
+ }
+ release, err := s.opsLimiter.WriteRequest(ctx)
+ if err != nil {
+ return err
+ }
+ defer release()
+
+ return s.writeCache.Seal(ctx, prm)
}
diff --git a/pkg/local_object_storage/util/ecinfo.go b/pkg/local_object_storage/util/ecinfo.go
new file mode 100644
index 0000000000..a92fbceea5
--- /dev/null
+++ b/pkg/local_object_storage/util/ecinfo.go
@@ -0,0 +1,25 @@
+package util
+
+import (
+ "bytes"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+)
+
+// MergeECInfo ignores conflicts and rewrites `to` with non empty values
+// from `from`.
+func MergeECInfo(from, to *objectSDK.ECInfo) *objectSDK.ECInfo {
+ for _, fchunk := range from.Chunks {
+ add := true
+ for _, tchunk := range to.Chunks {
+ if bytes.Equal(tchunk.ID.GetValue(), fchunk.ID.GetValue()) {
+ add = false
+ break
+ }
+ }
+ if add {
+ to.AddChunk(*objectSDK.NewECChunkFromV2(&fchunk))
+ }
+ }
+ return to
+}
diff --git a/pkg/local_object_storage/util/ecinfo_test.go b/pkg/local_object_storage/util/ecinfo_test.go
new file mode 100644
index 0000000000..0810060880
--- /dev/null
+++ b/pkg/local_object_storage/util/ecinfo_test.go
@@ -0,0 +1,56 @@
+package util
+
+import (
+ "crypto/rand"
+ "testing"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMergeECInfo(t *testing.T) {
+ id := generateV2ID()
+ target := objectSDK.NewECInfo()
+ var chunk objectSDK.ECChunk
+ chunk.Total = 2
+ chunk.Index = 0
+ chunk.SetID(id)
+ target.AddChunk(chunk)
+
+ t.Run("merge empty", func(t *testing.T) {
+ to := objectSDK.NewECInfo()
+
+ result := MergeECInfo(target, to)
+ require.Equal(t, result, target)
+ })
+
+ t.Run("merge existed", func(t *testing.T) {
+ to := objectSDK.NewECInfo()
+ to.AddChunk(chunk)
+
+ result := MergeECInfo(target, to)
+ require.Equal(t, result, target)
+ })
+ t.Run("merge extend", func(t *testing.T) {
+ to := objectSDK.NewECInfo()
+ var chunk objectSDK.ECChunk
+ chunk.Total = 2
+ chunk.Index = 1
+ chunk.SetID(generateV2ID())
+ to.AddChunk(chunk)
+
+ result := MergeECInfo(target, to)
+ require.Equal(t, len(result.Chunks), 2)
+ })
+}
+
+func generateV2ID() oid.ID {
+ var buf [32]byte
+ _, _ = rand.Read(buf[:])
+
+ var id oid.ID
+ _ = id.Decode(buf[:])
+
+ return id
+}
diff --git a/pkg/local_object_storage/util/splitinfo.go b/pkg/local_object_storage/util/splitinfo.go
index 4a6d22268d..6ae1c3e46d 100644
--- a/pkg/local_object_storage/util/splitinfo.go
+++ b/pkg/local_object_storage/util/splitinfo.go
@@ -1,12 +1,12 @@
package util
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
// MergeSplitInfo ignores conflicts and rewrites `to` with non empty values
// from `from`.
-func MergeSplitInfo(from, to *object.SplitInfo) *object.SplitInfo {
+func MergeSplitInfo(from, to *objectSDK.SplitInfo) *objectSDK.SplitInfo {
to.SetSplitID(from.SplitID()) // overwrite SplitID and ignore conflicts
if lp, ok := from.LastPart(); ok {
diff --git a/pkg/local_object_storage/util/splitinfo_test.go b/pkg/local_object_storage/util/splitinfo_test.go
index a0626db282..0b7be5af33 100644
--- a/pkg/local_object_storage/util/splitinfo_test.go
+++ b/pkg/local_object_storage/util/splitinfo_test.go
@@ -1,11 +1,11 @@
package util_test
import (
- "math/rand"
+ "crypto/rand"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
@@ -15,7 +15,7 @@ func TestMergeSplitInfo(t *testing.T) {
uid, err := uuid.NewUUID()
require.NoError(t, err)
- splitID := object.NewSplitID()
+ splitID := objectSDK.NewSplitID()
splitID.SetUUID(uid)
var rawLinkID, rawLastID [32]byte
@@ -30,35 +30,35 @@ func TestMergeSplitInfo(t *testing.T) {
require.NoError(t, err)
lastID.SetSHA256(rawLastID)
- target := object.NewSplitInfo() // target is SplitInfo struct with all fields set
+ target := objectSDK.NewSplitInfo() // target is SplitInfo struct with all fields set
target.SetSplitID(splitID)
target.SetLastPart(lastID)
target.SetLink(linkID)
t.Run("merge empty", func(t *testing.T) {
- to := object.NewSplitInfo()
+ to := objectSDK.NewSplitInfo()
result := util.MergeSplitInfo(target, to)
require.Equal(t, result, target)
})
t.Run("merge link", func(t *testing.T) {
- from := object.NewSplitInfo()
+ from := objectSDK.NewSplitInfo()
from.SetSplitID(splitID)
from.SetLastPart(lastID)
- to := object.NewSplitInfo()
+ to := objectSDK.NewSplitInfo()
to.SetLink(linkID)
result := util.MergeSplitInfo(from, to)
require.Equal(t, result, target)
})
t.Run("merge last", func(t *testing.T) {
- from := object.NewSplitInfo()
+ from := objectSDK.NewSplitInfo()
from.SetSplitID(splitID)
from.SetLink(linkID)
- to := object.NewSplitInfo()
+ to := objectSDK.NewSplitInfo()
to.SetLastPart(lastID)
result := util.MergeSplitInfo(from, to)
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
new file mode 100644
index 0000000000..fd85b4501a
--- /dev/null
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -0,0 +1,122 @@
+package benchmark
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "github.com/stretchr/testify/require"
+)
+
+func BenchmarkWritecacheSeq(b *testing.B) {
+ const payloadSize = 8 << 10
+ b.Run("bbolt_seq", func(b *testing.B) {
+ benchmarkPutSeq(b, newCache(b), payloadSize)
+ })
+}
+
+func BenchmarkWritecachePar(b *testing.B) {
+ const payloadSize = 8 << 10
+ b.Run("bbolt_par", func(b *testing.B) {
+ benchmarkPutPar(b, newCache(b), payloadSize)
+ })
+}
+
+func BenchmarkWriteAfterDelete(b *testing.B) {
+ const payloadSize = 32 << 10
+ const parallel = 25
+
+ cache := newCache(b)
+ benchmarkPutPrepare(b, cache)
+ b.Run(fmt.Sprintf("%dB_before", payloadSize), func(b *testing.B) {
+ b.SetParallelism(parallel)
+ benchmarkRunPar(b, cache, payloadSize)
+ })
+ require.NoError(b, cache.Flush(context.Background(), false, false))
+ b.Run(fmt.Sprintf("%dB_after", payloadSize), func(b *testing.B) {
+ b.SetParallelism(parallel)
+ benchmarkRunPar(b, cache, payloadSize)
+ })
+ require.NoError(b, cache.Close(context.Background()))
+}
+
+func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
+ benchmarkPutPrepare(b, cache)
+ defer func() { require.NoError(b, cache.Close(context.Background())) }()
+
+ ctx := context.Background()
+ objGen := testutil.RandObjGenerator{ObjSize: size}
+
+ b.ResetTimer()
+ for range b.N {
+ obj := objGen.Next()
+ rawData, err := obj.Marshal()
+ require.NoError(b, err, "marshaling object")
+ prm := common.PutPrm{
+ Address: testutil.AddressFromObject(b, obj),
+ Object: obj,
+ RawData: rawData,
+ }
+ if _, err := cache.Put(ctx, prm); err != nil {
+ b.Fatalf("putting: %v", err)
+ }
+ }
+}
+
+func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
+ benchmarkPutPrepare(b, cache)
+ defer func() { require.NoError(b, cache.Close(context.Background())) }()
+
+ benchmarkRunPar(b, cache, size)
+}
+
+func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) {
+ ctx := context.Background()
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ objGen := testutil.RandObjGenerator{ObjSize: size}
+ for pb.Next() {
+ obj := objGen.Next()
+ rawData, err := obj.Marshal()
+ require.NoError(b, err, "marshaling object")
+ prm := common.PutPrm{
+ Address: testutil.AddressFromObject(b, obj),
+ Object: obj,
+ RawData: rawData,
+ }
+ if _, err := cache.Put(ctx, prm); err != nil {
+ b.Fatalf("putting: %v", err)
+ }
+ }
+ })
+}
+
+func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) {
+ require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening")
+ require.NoError(b, cache.Init(context.Background()), "initializing")
+}
+
+type testMetabase struct{}
+
+func (testMetabase) UpdateStorageID(context.Context, meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error) {
+ return meta.UpdateStorageIDRes{}, nil
+}
+
+func newCache(b *testing.B) writecache.Cache {
+ bs := teststore.New(
+ teststore.WithPut(func(pp common.PutPrm) (common.PutRes, error) { return common.PutRes{}, nil }),
+ )
+ return writecache.New(
+ writecache.WithPath(b.TempDir()),
+ writecache.WithBlobstor(bs),
+ writecache.WithMetabase(testMetabase{}),
+ writecache.WithMaxCacheSize(256<<30),
+ )
+}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
new file mode 100644
index 0000000000..ee709ea732
--- /dev/null
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -0,0 +1,144 @@
+package writecache
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+)
+
+type cache struct {
+ options
+
+ mode mode.Mode
+ modeMtx sync.RWMutex
+
+ // flushCh is a channel with objects to flush.
+ flushCh chan objectInfo
+ // cancel is cancel function, protected by modeMtx in Close.
+ cancel atomic.Value
+ // wg is a wait group for flush workers.
+ wg sync.WaitGroup
+ // fsTree contains big files stored directly on file-system.
+ fsTree *fstree.FSTree
+ // counter contains atomic counters for the number of objects stored in cache.
+ counter *fstree.SimpleCounter
+}
+
+// wcStorageType is used for write-cache operations logging.
+const wcStorageType = "write-cache"
+
+type objectInfo struct {
+ addr oid.Address
+ size uint64
+}
+
+const (
+ defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB
+ defaultMaxCacheSize = 1 << 30 // 1 GiB
+)
+
+var dummyCanceler context.CancelFunc = func() {}
+
+// New creates new writecache instance.
+func New(opts ...Option) Cache {
+ c := &cache{
+ flushCh: make(chan objectInfo),
+ mode: mode.Disabled,
+ counter: fstree.NewSimpleCounter(),
+
+ options: options{
+ log: logger.NewLoggerWrapper(zap.NewNop()),
+ maxObjectSize: defaultMaxObjectSize,
+ workersCount: defaultFlushWorkersCount,
+ maxCacheSize: defaultMaxCacheSize,
+ metrics: DefaultMetrics(),
+ flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
+ qosLimiter: qos.NewNoopLimiter(),
+ },
+ }
+
+ for i := range opts {
+ opts[i](&c.options)
+ }
+
+ return c
+}
+
+// SetLogger sets logger. It is used after the shard ID was generated to use it in logs.
+func (c *cache) SetLogger(l *logger.Logger) {
+ c.log = l
+}
+
+func (c *cache) DumpInfo() Info {
+ return Info{
+ Path: c.path,
+ }
+}
+
+// Open opens and initializes database. Reads object counters from the ObjectCounters instance.
+func (c *cache) Open(_ context.Context, mod mode.Mode) error {
+ c.modeMtx.Lock()
+ defer c.modeMtx.Unlock()
+ c.mode = mod
+ if mod.NoMetabase() {
+ return nil
+ }
+ err := c.openStore(mode.ConvertToComponentModeDegraded(mod))
+ if err != nil {
+ return metaerr.Wrap(err)
+ }
+ c.initCounters()
+ return nil
+}
+
+// Init runs necessary services.
+func (c *cache) Init(ctx context.Context) error {
+ c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode))
+ if err := c.flushAndDropBBoltDB(ctx); err != nil {
+ return fmt.Errorf("flush previous version write-cache database: %w", err)
+ }
+ ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache
+ c.cancel.Store(cancel)
+ c.runFlushLoop(ctx)
+ return nil
+}
+
+// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op.
+func (c *cache) Close(ctx context.Context) error {
+ if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil {
+ cancelValue.(context.CancelFunc)()
+ }
+ // We cannot lock mutex for the whole operation duration
+ // because it is taken by some background workers, so `wg.Wait()` is done without modeMtx.
+ c.modeMtx.Lock()
+ c.mode = mode.DegradedReadOnly // prevent new operations from being processed
+ c.modeMtx.Unlock()
+
+ c.wg.Wait()
+
+ c.modeMtx.Lock()
+ defer c.modeMtx.Unlock()
+
+ var err error
+ if c.fsTree != nil {
+ err = c.fsTree.Close(ctx)
+ if err != nil {
+ c.fsTree = nil
+ }
+ }
+ c.metrics.Close()
+ return nil
+}
+
+func (c *cache) GetMetrics() Metrics {
+ return c.metrics
+}
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go
index dcfea8dd00..94a0a40db9 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/delete.go
@@ -1,59 +1,59 @@
package writecache
import (
+ "context"
+ "time"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Delete removes object from write-cache.
//
// Returns an error of type apistatus.ObjectNotFound if object is missing in write-cache.
-func (c *cache) Delete(addr oid.Address) error {
- c.modeMtx.RLock()
+// Returns ErrNotInitialized if write-cache has not been initialized yet.
+// Returns ErrDegraded if write-cache is in DEGRADED mode.
+func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Delete",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ deleted := false
+ storageType := StorageTypeUndefined
+ startedAt := time.Now()
+ defer func() {
+ c.metrics.Delete(time.Since(startedAt), deleted, storageType)
+ }()
+
+ if !c.modeMtx.TryRLock() {
+ return ErrNotInitialized
+ }
defer c.modeMtx.RUnlock()
if c.readOnly() {
return ErrReadOnly
}
-
- saddr := addr.EncodeToString()
-
- // Check disk cache.
- var has int
- _ = c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- has = len(b.Get([]byte(saddr)))
- return nil
- })
-
- if 0 < has {
- err := c.db.Update(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- err := b.Delete([]byte(saddr))
- return err
- })
- if err != nil {
- return err
- }
- storagelog.Write(c.log,
- storagelog.AddressField(saddr),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db DELETE"),
- )
- c.objCounters.DecDB()
- return nil
+ if c.noMetabase() {
+ return ErrDegraded
}
- _, err := c.fsTree.Delete(common.DeletePrm{Address: addr})
+ storageType = StorageTypeFSTree
+ _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
- storagelog.Write(c.log,
- storagelog.AddressField(saddr),
+ storagelog.Write(ctx, c.log,
+ storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
)
- c.objCounters.DecFS()
+ deleted = true
+ // counter changed by fstree
+ c.estimateCacheSize()
}
-
- return err
+ return metaerr.Wrap(err)
}
diff --git a/pkg/local_object_storage/writecache/doc.go b/pkg/local_object_storage/writecache/doc.go
deleted file mode 100644
index f2e904030b..0000000000
--- a/pkg/local_object_storage/writecache/doc.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Package writecache implements write-cache for objects.
-//
-// Write-cache has 2 components:
-// 1. Key-value (bbolt) database for storing small objects.
-// 2. Filesystem tree for storing big objects.
-//
-// Flushing from the writecache to the main storage is done in the background.
-// To make it possible to serve Read requests after the object was flushed,
-// we maintain an LRU cache containing addresses of all the objects that
-// could be safely deleted. The actual deletion is done during eviction from this cache.
-package writecache
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 0437367e7e..893d27ba29 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -1,260 +1,208 @@
package writecache
import (
- "bytes"
+ "context"
"errors"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/mr-tron/base58"
- "github.com/nspcc-dev/neo-go/pkg/util/slice"
- "go.etcd.io/bbolt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
const (
- // flushBatchSize is amount of keys which will be read from cache to be flushed
- // to the main storage. It is used to reduce contention between cache put
- // and cache persist.
- flushBatchSize = 512
// defaultFlushWorkersCount is number of workers for putting objects in main storage.
defaultFlushWorkersCount = 20
// defaultFlushInterval is default time interval between successive flushes.
- defaultFlushInterval = time.Second
+ defaultFlushInterval = 10 * time.Second
)
+var errIterationCompleted = errors.New("iteration completed")
+
// runFlushLoop starts background workers which periodically flush objects to the blobstor.
-func (c *cache) runFlushLoop() {
- for i := 0; i < c.workersCount; i++ {
- c.wg.Add(1)
- go c.flushWorker(i)
+func (c *cache) runFlushLoop(ctx context.Context) {
+ if c.disableBackgroundFlush {
+ return
}
-
- c.wg.Add(1)
- go c.flushBigObjects()
-
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String())
+ fl := newFlushLimiter(c.flushSizeLimit)
c.wg.Add(1)
go func() {
defer c.wg.Done()
-
- tt := time.NewTimer(defaultFlushInterval)
- defer tt.Stop()
-
- for {
- select {
- case <-tt.C:
- c.flushDB()
- tt.Reset(defaultFlushInterval)
- case <-c.closeCh:
- return
- }
- }
+ c.pushToFlushQueue(ctx, fl)
}()
-}
-func (c *cache) flushDB() {
- var lastKey []byte
- var m []objectInfo
- for {
- select {
- case <-c.closeCh:
- return
- default:
- }
-
- m = m[:0]
-
- c.modeMtx.RLock()
- if c.readOnly() || !c.initialized.Load() {
- c.modeMtx.RUnlock()
- time.Sleep(time.Second)
- continue
- }
-
- // We put objects in batches of fixed size to not interfere with main put cycle a lot.
- _ = c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- cs := b.Cursor()
-
- var k, v []byte
-
- if len(lastKey) == 0 {
- k, v = cs.First()
- } else {
- k, v = cs.Seek(lastKey)
- if bytes.Equal(k, lastKey) {
- k, v = cs.Next()
- }
- }
-
- for ; k != nil && len(m) < flushBatchSize; k, v = cs.Next() {
- if len(lastKey) == len(k) {
- copy(lastKey, k)
- } else {
- lastKey = slice.Copy(k)
- }
-
- m = append(m, objectInfo{
- addr: string(k),
- data: slice.Copy(v),
- })
- }
- return nil
- })
-
- var count int
- for i := range m {
- if c.flushed.Contains(m[i].addr) {
- continue
- }
-
- obj := object.New()
- if err := obj.Unmarshal(m[i].data); err != nil {
- continue
- }
-
- count++
- select {
- case c.flushCh <- obj:
- case <-c.closeCh:
- c.modeMtx.RUnlock()
- return
- }
- }
-
- if count == 0 {
- c.modeMtx.RUnlock()
- break
- }
-
- c.modeMtx.RUnlock()
-
- c.log.Debug("tried to flush items from write-cache",
- zap.Int("count", count),
- zap.String("start", base58.Encode(lastKey)))
+ for range c.workersCount {
+ c.wg.Add(1)
+ go c.workerFlush(ctx, fl)
}
}
-func (c *cache) flushBigObjects() {
- defer c.wg.Done()
+func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
+ stopf := context.AfterFunc(ctx, func() {
+ fl.close()
+ })
+ defer stopf()
- tick := time.NewTicker(defaultFlushInterval * 10)
+ tick := time.NewTicker(defaultFlushInterval)
for {
select {
case <-tick.C:
c.modeMtx.RLock()
- if c.readOnly() {
- c.modeMtx.RUnlock()
- break
- } else if !c.initialized.Load() {
+ if c.readOnly() || c.noMetabase() {
c.modeMtx.RUnlock()
continue
}
- _ = c.flushFSTree(true)
+ release, err := c.qosLimiter.ReadRequest(ctx)
+ if err != nil {
+ c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err))
+ c.modeMtx.RUnlock()
+ continue
+ }
+ err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
+ if err := fl.acquire(oi.DataSize); err != nil {
+ return err
+ }
+ select {
+ case c.flushCh <- objectInfo{
+ addr: oi.Address,
+ size: oi.DataSize,
+ }:
+ return nil
+ case <-ctx.Done():
+ fl.release(oi.DataSize)
+ return ctx.Err()
+ }
+ })
+ release()
+ if err != nil {
+ c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
+ }
c.modeMtx.RUnlock()
- case <-c.closeCh:
+
+ // counter changed by fstree
+ c.estimateCacheSize()
+ case <-ctx.Done():
return
}
}
}
-func (c *cache) reportFlushError(msg string, addr string, err error) {
+func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) {
+ defer c.wg.Done()
+
+ var objInfo objectInfo
+ for {
+ select {
+ case objInfo = <-c.flushCh:
+ c.flushIfAnObjectExistsWorker(ctx, objInfo, fl)
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) {
+ defer fl.release(objInfo.size)
+
+ release, err := c.qosLimiter.WriteRequest(ctx)
+ if err != nil {
+ c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err))
+ return
+ }
+ defer release()
+ res, err := c.fsTree.Get(ctx, common.GetPrm{
+ Address: objInfo.addr,
+ })
+ if err != nil {
+ if !client.IsErrObjectNotFound(err) {
+ c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
+ }
+ return
+ }
+
+ err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree)
+ if err != nil {
+ // Error is handled in flushObject.
+ return
+ }
+
+ c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData)))
+}
+
+func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) {
if c.reportError != nil {
- c.reportError(msg, err)
+ c.reportError(ctx, msg, err)
} else {
- c.log.Error(msg,
+ c.log.Error(ctx, msg,
zap.String("address", addr),
zap.Error(err))
}
}
-func (c *cache) flushFSTree(ignoreErrors bool) error {
+func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
var prm common.IteratePrm
prm.IgnoreErrors = ignoreErrors
- prm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
- sAddr := addr.EncodeToString()
+ prm.Handler = func(e common.IterationElement) error {
+ sAddr := e.Address.EncodeToString()
- if _, ok := c.store.flushed.Peek(sAddr); ok {
- return nil
- }
-
- data, err := f()
+ var obj objectSDK.Object
+ err := obj.Unmarshal(e.ObjectData)
if err != nil {
- c.reportFlushError("can't read a file", sAddr, err)
+ c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
if ignoreErrors {
return nil
}
return err
}
- var obj object.Object
- err = obj.Unmarshal(data)
+ err = c.flushObject(ctx, &obj, e.ObjectData, StorageTypeFSTree)
if err != nil {
- c.reportFlushError("can't unmarshal an object", sAddr, err)
- if ignoreErrors {
- return nil
- }
return err
}
- err = c.flushObject(&obj, data)
- if err != nil {
- if ignoreErrors {
- return nil
- }
- return err
- }
-
- // mark object as flushed
- c.flushed.Add(sAddr, false)
-
+ c.deleteFromDisk(ctx, e.Address, uint64(len(e.ObjectData)))
return nil
}
- _, err := c.fsTree.Iterate(prm)
+ _, err := c.fsTree.Iterate(ctx, prm)
return err
}
-// flushWorker writes objects to the main storage.
-func (c *cache) flushWorker(_ int) {
- defer c.wg.Done()
-
- var obj *object.Object
- for {
- // Give priority to direct put.
- select {
- case obj = <-c.flushCh:
- case <-c.closeCh:
- return
- }
-
- err := c.flushObject(obj, nil)
- if err == nil {
- c.flushed.Add(objectCore.AddressOf(obj).EncodeToString(), true)
- }
- }
-}
-
// flushObject is used to write object directly to the main storage.
-func (c *cache) flushObject(obj *object.Object, data []byte) error {
+func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []byte, st StorageType) error {
+ var err error
+
+ defer func() {
+ c.metrics.Flush(err == nil, st)
+ }()
+
addr := objectCore.AddressOf(obj)
var prm common.PutPrm
prm.Object = obj
prm.RawData = data
- res, err := c.blobstor.Put(prm)
+ res, err := c.blobstor.Put(ctx, prm)
if err != nil {
if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
!errors.Is(err, blobstor.ErrNoPlaceFound) {
- c.reportFlushError("can't flush an object to blobstor",
+ c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor,
addr.EncodeToString(), err)
}
return err
@@ -264,61 +212,43 @@ func (c *cache) flushObject(obj *object.Object, data []byte) error {
updPrm.SetAddress(addr)
updPrm.SetStorageID(res.StorageID)
- _, err = c.metabase.UpdateStorageID(updPrm)
+ _, err = c.metabase.UpdateStorageID(ctx, updPrm)
if err != nil {
- c.reportFlushError("can't update object storage ID",
+ c.reportFlushError(ctx, logs.FSTreeCantUpdateID,
addr.EncodeToString(), err)
}
return err
}
// Flush flushes all objects from the write-cache to the main storage.
-// Write-cache must be in readonly mode to ensure correctness of an operation and
-// to prevent interference with background flush workers.
-func (c *cache) Flush(ignoreErrors bool) error {
- c.modeMtx.RLock()
- defer c.modeMtx.RUnlock()
+func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Flush",
+ trace.WithAttributes(
+ attribute.Bool("ignore_errors", ignoreErrors),
+ attribute.Bool("seal", seal),
+ ))
+ defer span.End()
- return c.flush(ignoreErrors)
-}
+ c.modeMtx.Lock() // exclusive lock to not to conflict with background flush
+ defer c.modeMtx.Unlock()
+ if c.noMetabase() {
+ return ErrDegraded
+ }
-func (c *cache) flush(ignoreErrors bool) error {
- if err := c.flushFSTree(ignoreErrors); err != nil {
+ if err := c.flush(ctx, ignoreErrors); err != nil {
return err
}
- return c.db.View(func(tx *bbolt.Tx) error {
- var addr oid.Address
-
- b := tx.Bucket(defaultBucket)
- cs := b.Cursor()
- for k, data := cs.Seek(nil); k != nil; k, data = cs.Next() {
- sa := string(k)
- if _, ok := c.flushed.Peek(sa); ok {
- continue
- }
-
- if err := addr.DecodeString(sa); err != nil {
- c.reportFlushError("can't decode object address from the DB", sa, err)
- if ignoreErrors {
- continue
- }
- return err
- }
-
- var obj object.Object
- if err := obj.Unmarshal(data); err != nil {
- c.reportFlushError("can't unmarshal an object from the DB", sa, err)
- if ignoreErrors {
- continue
- }
- return err
- }
-
- if err := c.flushObject(&obj, data); err != nil {
- return err
- }
+ if seal {
+ m := c.mode | mode.ReadOnly
+ if err := c.setMode(ctx, m, setModePrm{ignoreErrors: ignoreErrors}); err != nil {
+ return err
}
- return nil
- })
+ c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
+ }
+ return nil
+}
+
+func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
+ return c.flushFSTree(ctx, ignoreErrors)
}
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index e6de8a0282..7fc84657c0 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -1,206 +1,63 @@
package writecache
import (
+ "context"
"os"
"path/filepath"
+ "sync/atomic"
"testing"
- "time"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- versionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"github.com/stretchr/testify/require"
- "go.etcd.io/bbolt"
- "go.uber.org/atomic"
- "go.uber.org/zap/zaptest"
+ "go.uber.org/zap"
)
-type objectPair struct {
- addr oid.Address
- obj *object.Object
-}
-
func TestFlush(t *testing.T) {
- const (
- objCount = 4
- smallSize = 256
- )
+ testlogger := test.NewLogger(t)
- newCache := func(t *testing.T, opts ...Option) (Cache, *blobstor.BlobStor, *meta.DB) {
- dir := t.TempDir()
- mb := meta.New(
- meta.WithPath(filepath.Join(dir, "meta")),
- meta.WithEpochState(dummyEpoch{}))
- require.NoError(t, mb.Open(false))
- require.NoError(t, mb.Init())
-
- fsTree := fstree.New(
- fstree.WithPath(filepath.Join(dir, "blob")),
- fstree.WithDepth(0),
- fstree.WithDirNameLen(1))
- bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{
- {Storage: fsTree},
- }))
- require.NoError(t, bs.Open(false))
- require.NoError(t, bs.Init())
-
- wc := New(
+ createCacheFn := func(t *testing.T, mb *meta.DB, bs MainStorage, opts ...Option) Cache {
+ return New(
append([]Option{
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- WithPath(filepath.Join(dir, "writecache")),
- WithSmallObjectSize(smallSize),
+ WithLogger(testlogger),
+ WithPath(filepath.Join(t.TempDir(), "writecache")),
WithMetabase(mb),
WithBlobstor(bs),
+ WithDisableBackgroundFlush(),
}, opts...)...)
- require.NoError(t, wc.Open(false))
- initWC(t, wc)
-
- // First set mode for metabase and blobstor to prevent background flushes.
- require.NoError(t, mb.SetMode(mode.ReadOnly))
- require.NoError(t, bs.SetMode(mode.ReadOnly))
-
- return wc, bs, mb
}
- putObjects := func(t *testing.T, c Cache) []objectPair {
- objects := make([]objectPair, objCount)
- for i := range objects {
- objects[i] = putObject(t, c, 1+(i%2)*smallSize)
- }
- return objects
+ errCountOpt := func() (Option, *atomic.Uint32) {
+ cnt := &atomic.Uint32{}
+ return WithReportErrorFunc(func(ctx context.Context, msg string, err error) {
+ cnt.Add(1)
+ testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
+ }), cnt
}
- check := func(t *testing.T, mb *meta.DB, bs *blobstor.BlobStor, objects []objectPair) {
- for i := range objects {
- var mPrm meta.StorageIDPrm
- mPrm.SetAddress(objects[i].addr)
-
- mRes, err := mb.StorageID(mPrm)
- require.NoError(t, err)
-
- var prm common.GetPrm
- prm.Address = objects[i].addr
- prm.StorageID = mRes.StorageID()
-
- res, err := bs.Get(prm)
- require.NoError(t, err)
- require.Equal(t, objects[i].obj, res.Object)
- }
- }
-
- t.Run("no errors", func(t *testing.T) {
- wc, bs, mb := newCache(t)
- objects := putObjects(t, wc)
-
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
-
- wc.(*cache).flushed.Add(objects[0].addr.EncodeToString(), true)
- wc.(*cache).flushed.Add(objects[1].addr.EncodeToString(), false)
-
- require.NoError(t, wc.Flush(false))
-
- for i := 0; i < 2; i++ {
- var mPrm meta.GetPrm
- mPrm.SetAddress(objects[i].addr)
- _, err := mb.Get(mPrm)
- require.Error(t, err)
-
- _, err = bs.Get(common.GetPrm{Address: objects[i].addr})
- require.Error(t, err)
- }
-
- check(t, mb, bs, objects[2:])
- })
-
- t.Run("flush on moving to degraded mode", func(t *testing.T) {
- wc, bs, mb := newCache(t)
- objects := putObjects(t, wc)
-
- // Blobstor is read-only, so we expect en error from `flush` here.
- require.Error(t, wc.SetMode(mode.Degraded))
-
- // First move to read-only mode to close background workers.
- require.NoError(t, wc.SetMode(mode.ReadOnly))
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
-
- wc.(*cache).flushed.Add(objects[0].addr.EncodeToString(), true)
- wc.(*cache).flushed.Add(objects[1].addr.EncodeToString(), false)
-
- require.NoError(t, wc.SetMode(mode.Degraded))
-
- for i := 0; i < 2; i++ {
- var mPrm meta.GetPrm
- mPrm.SetAddress(objects[i].addr)
- _, err := mb.Get(mPrm)
- require.Error(t, err)
-
- _, err = bs.Get(common.GetPrm{Address: objects[i].addr})
- require.Error(t, err)
- }
-
- check(t, mb, bs, objects[2:])
- })
-
- t.Run("ignore errors", func(t *testing.T) {
- testIgnoreErrors := func(t *testing.T, f func(*cache)) {
- var errCount atomic.Uint32
- wc, bs, mb := newCache(t, WithReportErrorFunc(func(message string, err error) {
- errCount.Inc()
- }))
- objects := putObjects(t, wc)
- f(wc.(*cache))
-
- require.NoError(t, wc.SetMode(mode.ReadOnly))
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
-
- require.Equal(t, uint32(0), errCount.Load())
- require.Error(t, wc.Flush(false))
- require.True(t, errCount.Load() > 0)
- require.NoError(t, wc.Flush(true))
-
- check(t, mb, bs, objects)
- }
- t.Run("db, invalid address", func(t *testing.T) {
- testIgnoreErrors(t, func(c *cache) {
- _, data := newObject(t, 1)
- require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- return b.Put([]byte{1, 2, 3}, data)
- }))
- })
- })
- t.Run("db, invalid object", func(t *testing.T) {
- testIgnoreErrors(t, func(c *cache) {
- require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- return b.Put([]byte(oidtest.Address().EncodeToString()), []byte{1, 2, 3})
- }))
- })
- })
- t.Run("fs, read error", func(t *testing.T) {
- testIgnoreErrors(t, func(c *cache) {
- obj, data := newObject(t, 1)
+ failures := []TestFailureInjector[Option]{
+ {
+ Desc: "fs, read error",
+ InjectFn: func(t *testing.T, wc Cache) {
+ c := wc.(*cache)
+ obj := testutil.GenerateObject()
+ data, err := obj.Marshal()
+ require.NoError(t, err)
var prm common.PutPrm
prm.Address = objectCore.AddressOf(obj)
prm.RawData = data
- _, err := c.fsTree.Put(prm)
+ _, err = c.fsTree.Put(context.Background(), prm)
require.NoError(t, err)
p := prm.Address.Object().EncodeToString() + "." + prm.Address.Container().EncodeToString()
@@ -208,122 +65,178 @@ func TestFlush(t *testing.T) {
_, err = os.Stat(p) // sanity check
require.NoError(t, err)
- require.NoError(t, os.Chmod(p, 0))
- })
- })
- t.Run("fs, invalid object", func(t *testing.T) {
- testIgnoreErrors(t, func(c *cache) {
+ require.NoError(t, os.Truncate(p, 0)) // corrupt the file contents, so that it can't be unmarshalled
+ },
+ },
+ {
+ Desc: "fs, invalid object",
+ InjectFn: func(t *testing.T, wc Cache) {
+ c := wc.(*cache)
var prm common.PutPrm
prm.Address = oidtest.Address()
prm.RawData = []byte{1, 2, 3}
- _, err := c.fsTree.Put(prm)
+ _, err := c.fsTree.Put(context.Background(), prm)
require.NoError(t, err)
- })
- })
+ },
+ },
+ }
+
+ runFlushTest(t, createCacheFn, errCountOpt, failures...)
+}
+
+const (
+ objCount = 4
+ smallSize = 256
+)
+
+type CreateCacheFunc[Option any] func(
+ t *testing.T,
+ meta *meta.DB,
+ bs MainStorage,
+ opts ...Option,
+) Cache
+
+type TestFailureInjector[Option any] struct {
+ Desc string
+ InjectFn func(*testing.T, Cache)
+}
+
+type objectPair struct {
+ addr oid.Address
+ obj *objectSDK.Object
+}
+
+func runFlushTest[Option any](
+ t *testing.T,
+ createCacheFn CreateCacheFunc[Option],
+ errCountOption func() (Option, *atomic.Uint32),
+ failures ...TestFailureInjector[Option],
+) {
+ t.Run("no errors", func(t *testing.T) {
+ wc, bs, mb := newCache(t, createCacheFn)
+ defer func() { require.NoError(t, wc.Close(context.Background())) }()
+ objects := putObjects(t, wc)
+
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
+
+ require.NoError(t, wc.Flush(context.Background(), false, false))
+
+ check(t, mb, bs, objects)
})
- t.Run("on init", func(t *testing.T) {
- wc, bs, mb := newCache(t)
- objects := []objectPair{
- // removed
- putObject(t, wc, 1),
- putObject(t, wc, smallSize+1),
- // not found
- putObject(t, wc, 1),
- putObject(t, wc, smallSize+1),
- // ok
- putObject(t, wc, 1),
- putObject(t, wc, smallSize+1),
- }
+ t.Run("flush on moving to degraded mode", func(t *testing.T) {
+ wc, bs, mb := newCache(t, createCacheFn)
+ defer func() { require.NoError(t, wc.Close(context.Background())) }()
+ objects := putObjects(t, wc)
- require.NoError(t, wc.Close())
- require.NoError(t, bs.SetMode(mode.ReadWrite))
- require.NoError(t, mb.SetMode(mode.ReadWrite))
+ // Blobstor is read-only, so we expect en error from `flush` here.
+ require.Error(t, wc.SetMode(context.Background(), mode.Degraded))
- for i := range objects {
- var prm meta.PutPrm
- prm.SetObject(objects[i].obj)
- _, err := mb.Put(prm)
- require.NoError(t, err)
- }
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, wc.SetMode(context.Background(), mode.Degraded))
- var inhumePrm meta.InhumePrm
- inhumePrm.SetAddresses(objects[0].addr, objects[1].addr)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
- _, err := mb.Inhume(inhumePrm)
- require.NoError(t, err)
+ check(t, mb, bs, objects)
+ })
- var deletePrm meta.DeletePrm
- deletePrm.SetAddresses(objects[2].addr, objects[3].addr)
- _, err = mb.Delete(deletePrm)
- require.NoError(t, err)
+ t.Run("ignore errors", func(t *testing.T) {
+ for _, f := range failures {
+ t.Run(f.Desc, func(t *testing.T) {
+ errCountOpt, errCount := errCountOption()
+ wc, bs, mb := newCache(t, createCacheFn, errCountOpt)
+ defer func() { require.NoError(t, wc.Close(context.Background())) }()
+ objects := putObjects(t, wc)
+ f.InjectFn(t, wc)
- require.NoError(t, bs.SetMode(mode.ReadOnly))
- require.NoError(t, mb.SetMode(mode.ReadOnly))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
- // Open in read-only: no error, nothing is removed.
- require.NoError(t, wc.Open(true))
- initWC(t, wc)
- for i := range objects {
- _, err := wc.Get(objects[i].addr)
- require.NoError(t, err, i)
- }
- require.NoError(t, wc.Close())
+ require.Equal(t, uint32(0), errCount.Load())
+ require.Error(t, wc.Flush(context.Background(), false, false))
+ require.Greater(t, errCount.Load(), uint32(0))
+ require.NoError(t, wc.Flush(context.Background(), true, false))
- // Open in read-write: no error, something is removed.
- require.NoError(t, wc.Open(false))
- initWC(t, wc)
- for i := range objects {
- _, err := wc.Get(objects[i].addr)
- if i < 2 {
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound), i)
- } else {
- require.NoError(t, err, i)
- }
+ check(t, mb, bs, objects)
+ })
}
})
}
+func newCache[Option any](
+ t *testing.T,
+ createCacheFn CreateCacheFunc[Option],
+ opts ...Option,
+) (Cache, *blobstor.BlobStor, *meta.DB) {
+ dir := t.TempDir()
+ mb := meta.New(
+ meta.WithPath(filepath.Join(dir, "meta")),
+ meta.WithEpochState(dummyEpoch{}))
+ require.NoError(t, mb.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, mb.Init(context.Background()))
+
+ bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{
+ {
+ Storage: fstree.New(
+ fstree.WithPath(filepath.Join(dir, "blob")),
+ fstree.WithDepth(0),
+ fstree.WithDirNameLen(1)),
+ },
+ }))
+ require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, bs.Init(context.Background()))
+
+ wc := createCacheFn(t, mb, bs, opts...)
+ require.NoError(t, wc.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, wc.Init(context.Background()))
+
+ // First set mode for metabase and blobstor to prevent background flushes.
+ require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, bs.SetMode(context.Background(), mode.ReadOnly))
+
+ return wc, bs, mb
+}
+
func putObject(t *testing.T, c Cache, size int) objectPair {
- obj, data := newObject(t, size)
+ obj := testutil.GenerateObjectWithSize(size)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
var prm common.PutPrm
prm.Address = objectCore.AddressOf(obj)
prm.Object = obj
prm.RawData = data
- _, err := c.Put(prm)
+ _, err = c.Put(context.Background(), prm)
require.NoError(t, err)
return objectPair{prm.Address, prm.Object}
-
}
-func newObject(t *testing.T, size int) (*object.Object, []byte) {
- obj := object.New()
- ver := versionSDK.Current()
-
- obj.SetID(oidtest.ID())
- obj.SetOwnerID(usertest.ID())
- obj.SetContainerID(cidtest.ID())
- obj.SetType(object.TypeRegular)
- obj.SetVersion(&ver)
- obj.SetPayloadChecksum(checksumtest.Checksum())
- obj.SetPayloadHomomorphicHash(checksumtest.Checksum())
- obj.SetPayload(make([]byte, size))
-
- data, err := obj.Marshal()
- require.NoError(t, err)
- return obj, data
+func putObjects(t *testing.T, c Cache) []objectPair {
+ objects := make([]objectPair, objCount)
+ for i := range objects {
+ objects[i] = putObject(t, c, 1+(i%2)*smallSize)
+ }
+ return objects
}
-func initWC(t *testing.T, wc Cache) {
- require.NoError(t, wc.Init())
+func check(t *testing.T, mb *meta.DB, bs *blobstor.BlobStor, objects []objectPair) {
+ for i := range objects {
+ var mPrm meta.StorageIDPrm
+ mPrm.SetAddress(objects[i].addr)
- require.Eventually(t, func() bool {
- rawWc := wc.(*cache)
- return rawWc.initialized.Load()
- }, 100*time.Second, 1*time.Millisecond)
+ mRes, err := mb.StorageID(context.Background(), mPrm)
+ require.NoError(t, err)
+
+ var prm common.GetPrm
+ prm.Address = objects[i].addr
+ prm.StorageID = mRes.StorageID()
+
+ res, err := bs.Get(context.Background(), prm)
+ require.NoError(t, err, objects[i].addr)
+ require.Equal(t, objects[i].obj, res.Object)
+ }
}
type dummyEpoch struct{}
diff --git a/pkg/local_object_storage/writecache/generic_test.go b/pkg/local_object_storage/writecache/generic_test.go
index 53d6624b7d..2913ed13b5 100644
--- a/pkg/local_object_storage/writecache/generic_test.go
+++ b/pkg/local_object_storage/writecache/generic_test.go
@@ -1,30 +1,17 @@
package writecache
import (
- "os"
- "path/filepath"
- "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
)
func TestGeneric(t *testing.T) {
- defer func() { _ = os.RemoveAll(t.Name()) }()
-
- var n int
- newCache := func(t *testing.T) storagetest.Component {
- n++
- dir := filepath.Join(t.Name(), strconv.Itoa(n))
- require.NoError(t, os.MkdirAll(dir, os.ModePerm))
+ storagetest.TestAll(t, func(t *testing.T) storagetest.Component {
return New(
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ WithLogger(test.NewLogger(t)),
WithFlushWorkersCount(2),
- WithPath(dir))
- }
-
- storagetest.TestAll(t, newCache)
+ WithPath(t.TempDir()))
+ })
}
diff --git a/pkg/local_object_storage/writecache/get.go b/pkg/local_object_storage/writecache/get.go
index a15f42e187..c0847a65f3 100644
--- a/pkg/local_object_storage/writecache/get.go
+++ b/pkg/local_object_storage/writecache/get.go
@@ -1,44 +1,87 @@
package writecache
import (
+ "bytes"
+ "context"
+ "time"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Get returns object from write-cache.
//
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in write-cache.
-func (c *cache) Get(addr oid.Address) (*objectSDK.Object, error) {
+func (c *cache) Get(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
saddr := addr.EncodeToString()
- value, err := Get(c.db, []byte(saddr))
- if err == nil {
- obj := objectSDK.New()
- c.flushed.Get(saddr)
- return obj, obj.Unmarshal(value)
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Get",
+ trace.WithAttributes(
+ attribute.String("address", saddr),
+ ))
+ defer span.End()
+
+ if !c.modeMtx.TryRLock() {
+ return nil, ErrNotInitialized
+ }
+ defer c.modeMtx.RUnlock()
+ if c.mode.NoMetabase() {
+ return nil, ErrDegraded
}
- res, err := c.fsTree.Get(common.GetPrm{Address: addr})
+ obj, err := c.getInternal(ctx, addr)
+ return obj, metaerr.Wrap(err)
+}
+
+func (c *cache) getInternal(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ found := false
+ storageType := StorageTypeUndefined
+ startedAt := time.Now()
+ defer func() {
+ c.metrics.Get(time.Since(startedAt), found, storageType)
+ }()
+
+ res, err := c.fsTree.Get(ctx, common.GetPrm{Address: addr})
if err != nil {
- return nil, logicerr.Wrap(apistatus.ObjectNotFound{})
+ return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
- c.flushed.Get(saddr)
+ found = true
+ storageType = StorageTypeFSTree
return res.Object, nil
}
// Head returns object header from write-cache.
//
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in write-cache.
-func (c *cache) Head(addr oid.Address) (*objectSDK.Object, error) {
- obj, err := c.Get(addr)
+func (c *cache) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ saddr := addr.EncodeToString()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "Head",
+ trace.WithAttributes(
+ attribute.String("address", saddr),
+ ))
+ defer span.End()
+
+ if !c.modeMtx.TryRLock() {
+ return nil, ErrNotInitialized
+ }
+ defer c.modeMtx.RUnlock()
+ if c.mode.NoMetabase() {
+ return nil, ErrDegraded
+ }
+
+ obj, err := c.getInternal(ctx, addr)
if err != nil {
- return nil, err
+ return nil, metaerr.Wrap(err)
}
return obj.CutPayload(), nil
@@ -49,6 +92,9 @@ func (c *cache) Head(addr oid.Address) (*objectSDK.Object, error) {
//
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in db.
func Get(db *bbolt.DB, key []byte) ([]byte, error) {
+ if db == nil {
+ return nil, ErrNotInitialized
+ }
var value []byte
err := db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket(defaultBucket)
@@ -57,10 +103,10 @@ func Get(db *bbolt.DB, key []byte) ([]byte, error) {
}
value = b.Get(key)
if value == nil {
- return logicerr.Wrap(apistatus.ObjectNotFound{})
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
- value = slice.Copy(value)
+ value = bytes.Clone(value)
return nil
})
- return value, err
+ return value, metaerr.Wrap(err)
}
diff --git a/pkg/local_object_storage/writecache/init.go b/pkg/local_object_storage/writecache/init.go
deleted file mode 100644
index 56b27ec4b2..0000000000
--- a/pkg/local_object_storage/writecache/init.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package writecache
-
-import (
- "errors"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
- "go.uber.org/zap"
-)
-
-func (c *cache) initFlushMarks() {
- var localWG sync.WaitGroup
-
- localWG.Add(1)
- go func() {
- defer localWG.Done()
-
- c.fsTreeFlushMarkUpdate()
- }()
-
- localWG.Add(1)
- go func() {
- defer localWG.Done()
-
- c.dbFlushMarkUpdate()
- }()
-
- c.initWG.Add(1)
- c.wg.Add(1)
- go func() {
- defer c.wg.Done()
- defer c.initWG.Done()
-
- localWG.Wait()
-
- select {
- case <-c.stopInitCh:
- return
- case <-c.closeCh:
- return
- default:
- }
-
- c.initialized.Store(true)
- }()
-}
-
-var errStopIter = errors.New("stop iteration")
-
-func (c *cache) fsTreeFlushMarkUpdate() {
- c.log.Info("filling flush marks for objects in FSTree")
-
- var prm common.IteratePrm
- prm.LazyHandler = func(addr oid.Address, _ func() ([]byte, error)) error {
- select {
- case <-c.closeCh:
- return errStopIter
- case <-c.stopInitCh:
- return errStopIter
- default:
- }
-
- flushed, needRemove := c.flushStatus(addr)
- if flushed {
- c.store.flushed.Add(addr.EncodeToString(), true)
- if needRemove {
- var prm common.DeletePrm
- prm.Address = addr
-
- _, err := c.fsTree.Delete(prm)
- if err == nil {
- storagelog.Write(c.log,
- storagelog.AddressField(addr),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("fstree DELETE"),
- )
- }
- }
- }
- return nil
- }
- _, _ = c.fsTree.Iterate(prm)
- c.log.Info("finished updating FSTree flush marks")
-}
-
-func (c *cache) dbFlushMarkUpdate() {
- c.log.Info("filling flush marks for objects in database")
-
- var m []string
- var indices []int
- var lastKey []byte
- var batchSize = flushBatchSize
- for {
- select {
- case <-c.closeCh:
- return
- case <-c.stopInitCh:
- return
- default:
- }
-
- m = m[:0]
- indices = indices[:0]
-
- // We put objects in batches of fixed size to not interfere with main put cycle a lot.
- _ = c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- cs := b.Cursor()
- for k, _ := cs.Seek(lastKey); k != nil && len(m) < batchSize; k, _ = cs.Next() {
- m = append(m, string(k))
- }
- return nil
- })
-
- var addr oid.Address
- for i := range m {
- if err := addr.DecodeString(m[i]); err != nil {
- continue
- }
-
- flushed, needRemove := c.flushStatus(addr)
- if flushed {
- c.store.flushed.Add(addr.EncodeToString(), true)
- if needRemove {
- indices = append(indices, i)
- }
- }
- }
-
- if len(m) == 0 {
- break
- }
-
- err := c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- for _, j := range indices {
- if err := b.Delete([]byte(m[j])); err != nil {
- return err
- }
- }
- return nil
- })
- if err == nil {
- for _, j := range indices {
- storagelog.Write(c.log,
- zap.String("address", m[j]),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db DELETE"),
- )
- }
- }
- lastKey = append([]byte(m[len(m)-1]), 0)
- }
-
- c.log.Info("finished updating flush marks")
-}
-
-// flushStatus returns info about the object state in the main storage.
-// First return value is true iff object exists.
-// Second return value is true iff object can be safely removed.
-func (c *cache) flushStatus(addr oid.Address) (bool, bool) {
- var existsPrm meta.ExistsPrm
- existsPrm.SetAddress(addr)
-
- _, err := c.metabase.Exists(existsPrm)
- if err != nil {
- needRemove := errors.Is(err, meta.ErrObjectIsExpired) || errors.As(err, new(apistatus.ObjectAlreadyRemoved))
- return needRemove, needRemove
- }
-
- var prm meta.StorageIDPrm
- prm.SetAddress(addr)
-
- mRes, _ := c.metabase.StorageID(prm)
- res, err := c.blobstor.Exists(common.ExistsPrm{Address: addr, StorageID: mRes.StorageID()})
- return err == nil && res.Exists, false
-}
diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go
index 228dd25977..e369fbd505 100644
--- a/pkg/local_object_storage/writecache/iterate.go
+++ b/pkg/local_object_storage/writecache/iterate.go
@@ -4,7 +4,7 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
)
@@ -12,65 +12,6 @@ import (
// ErrNoDefaultBucket is returned by IterateDB when default bucket for objects is missing.
var ErrNoDefaultBucket = errors.New("no default bucket")
-// IterationPrm contains iteration parameters.
-type IterationPrm struct {
- handler func([]byte) error
- ignoreErrors bool
-}
-
-// WithHandler sets a callback to be executed on every object.
-func (p *IterationPrm) WithHandler(f func([]byte) error) {
- p.handler = f
-}
-
-// WithIgnoreErrors sets a flag indicating that errors should be ignored.
-func (p *IterationPrm) WithIgnoreErrors(ignore bool) {
- p.ignoreErrors = ignore
-}
-
-// Iterate iterates over all objects present in write cache.
-// This is very difficult to do correctly unless write-cache is put in read-only mode.
-// Thus we silently fail if shard is not in read-only mode to avoid reporting misleading results.
-func (c *cache) Iterate(prm IterationPrm) error {
- c.modeMtx.RLock()
- defer c.modeMtx.RUnlock()
- if !c.readOnly() {
- return nil
- }
-
- err := c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- return b.ForEach(func(k, data []byte) error {
- if _, ok := c.flushed.Peek(string(k)); ok {
- return nil
- }
- return prm.handler(data)
- })
- })
- if err != nil {
- return err
- }
-
- var fsPrm common.IteratePrm
- fsPrm.IgnoreErrors = prm.ignoreErrors
- fsPrm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
- if _, ok := c.flushed.Peek(addr.EncodeToString()); ok {
- return nil
- }
- data, err := f()
- if err != nil {
- if prm.ignoreErrors {
- return nil
- }
- return err
- }
- return prm.handler(data)
- }
-
- _, err = c.fsTree.Iterate(fsPrm)
- return err
-}
-
// IterateDB iterates over all objects stored in bbolt.DB instance and passes them to f until error return.
// It is assumed that db is an underlying database of some WriteCache instance.
//
@@ -78,7 +19,7 @@ func (c *cache) Iterate(prm IterationPrm) error {
//
// DB must not be nil and should be opened.
func IterateDB(db *bbolt.DB, f func(oid.Address) error) error {
- return db.View(func(tx *bbolt.Tx) error {
+ return metaerr.Wrap(db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket(defaultBucket)
if b == nil {
return ErrNoDefaultBucket
@@ -86,13 +27,13 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error {
var addr oid.Address
- return b.ForEach(func(k, v []byte) error {
+ return b.ForEach(func(k, _ []byte) error {
err := addr.DecodeString(string(k))
if err != nil {
- return fmt.Errorf("could not parse object address: %w", err)
+ return fmt.Errorf("parse object address: %w", err)
}
return f(addr)
})
- })
+ }))
}
diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go
new file mode 100644
index 0000000000..0e020b36eb
--- /dev/null
+++ b/pkg/local_object_storage/writecache/limiter.go
@@ -0,0 +1,66 @@
+package writecache
+
+import (
+ "errors"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+)
+
+var errLimiterClosed = errors.New("acquire failed: limiter closed")
+
+// flushLimiter is used to limit the total size of objects
+// being flushed to blobstore at the same time. This is a necessary
+// limitation so that the flushing process does not have
+// a strong impact on user requests.
+type flushLimiter struct {
+ count, size uint64
+ maxSize uint64
+ cond *sync.Cond
+ closed bool
+}
+
+func newFlushLimiter(maxSize uint64) *flushLimiter {
+ return &flushLimiter{
+ maxSize: maxSize,
+ cond: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (l *flushLimiter) acquire(size uint64) error {
+ l.cond.L.Lock()
+ defer l.cond.L.Unlock()
+
+ // it is allowed to overflow maxSize to allow flushing objects with size > maxSize
+ for l.count > 0 && l.size+size > l.maxSize && !l.closed {
+ l.cond.Wait()
+ if l.closed {
+ return errLimiterClosed
+ }
+ }
+ l.count++
+ l.size += size
+ return nil
+}
+
+func (l *flushLimiter) release(size uint64) {
+ l.cond.L.Lock()
+ defer l.cond.L.Unlock()
+
+ assert.True(l.size >= size, "flushLimiter: invalid size")
+ l.size -= size
+
+ assert.True(l.count > 0, "flushLimiter: invalid count")
+ l.count--
+
+ l.cond.Broadcast()
+}
+
+func (l *flushLimiter) close() {
+ l.cond.L.Lock()
+ defer l.cond.L.Unlock()
+
+ l.closed = true
+
+ l.cond.Broadcast()
+}
diff --git a/pkg/local_object_storage/writecache/limiter_test.go b/pkg/local_object_storage/writecache/limiter_test.go
new file mode 100644
index 0000000000..1ca3e1156c
--- /dev/null
+++ b/pkg/local_object_storage/writecache/limiter_test.go
@@ -0,0 +1,27 @@
+package writecache
+
+import (
+ "sync/atomic"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestLimiter(t *testing.T) {
+ var maxSize uint64 = 10
+ var single uint64 = 3
+ l := newFlushLimiter(uint64(maxSize))
+ var currSize atomic.Int64
+ var eg errgroup.Group
+ for range 10_000 {
+ eg.Go(func() error {
+ defer l.release(single)
+ defer currSize.Add(-1)
+ l.acquire(single)
+ require.True(t, currSize.Add(1) <= 3)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+}
diff --git a/pkg/local_object_storage/writecache/metrics.go b/pkg/local_object_storage/writecache/metrics.go
new file mode 100644
index 0000000000..e3641f85e9
--- /dev/null
+++ b/pkg/local_object_storage/writecache/metrics.go
@@ -0,0 +1,60 @@
+package writecache
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+)
+
+type StorageType string
+
+func (t StorageType) String() string {
+ return string(t)
+}
+
+const (
+ StorageTypeUndefined StorageType = "null"
+ StorageTypeDB StorageType = "db"
+ StorageTypeFSTree StorageType = "fstree"
+)
+
+type Metrics interface {
+ SetShardID(string)
+ Get(d time.Duration, success bool, st StorageType)
+ Delete(d time.Duration, success bool, st StorageType)
+ Put(d time.Duration, success bool, st StorageType)
+ Flush(success bool, st StorageType)
+ Evict(st StorageType)
+
+ SetEstimateSize(uint64)
+ SetMode(m mode.ComponentMode)
+ SetActualCounters(uint64)
+ SetPath(path string)
+ Close()
+}
+
+func DefaultMetrics() Metrics { return metricsStub{} }
+
+type metricsStub struct{}
+
+func (metricsStub) SetShardID(string) {}
+
+func (metricsStub) SetPath(string) {}
+
+func (metricsStub) Get(time.Duration, bool, StorageType) {}
+
+func (metricsStub) Delete(time.Duration, bool, StorageType) {}
+
+func (metricsStub) Put(time.Duration, bool, StorageType) {}
+
+func (metricsStub) SetEstimateSize(uint64) {}
+
+func (metricsStub) SetMode(mode.ComponentMode) {}
+
+func (metricsStub) SetActualCounters(uint64) {}
+
+func (metricsStub) Flush(bool, StorageType) {}
+
+func (metricsStub) Evict(StorageType) {}
+
+func (metricsStub) Close() {}
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index 997310d9ee..c491be60b1 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -1,65 +1,66 @@
package writecache
import (
+ "context"
+ "errors"
"fmt"
+ "os"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
-// ErrReadOnly is returned when Put/Write is performed in a read-only mode.
-var ErrReadOnly = logicerr.New("write-cache is in read-only mode")
-
-// ErrNotInitialized is returned when write-cache is initializing.
-var ErrNotInitialized = logicerr.New("write-cache is not initialized yet")
+type setModePrm struct {
+ ignoreErrors bool
+ shrink bool
+}
// SetMode sets write-cache mode of operation.
// When shard is put in read-only mode all objects in memory are flushed to disk
// and all background jobs are suspended.
-func (c *cache) SetMode(m mode.Mode) error {
+func (c *cache) SetMode(ctx context.Context, m mode.Mode) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.SetMode",
+ trace.WithAttributes(
+ attribute.String("mode", m.String()),
+ ))
+ defer span.End()
+
c.modeMtx.Lock()
defer c.modeMtx.Unlock()
- return c.setMode(m)
+ err := c.setMode(ctx, m, setModePrm{ignoreErrors: true})
+ if err == nil {
+ c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
+ }
+ return err
}
// setMode applies new mode. Must be called with cache.modeMtx lock taken.
-func (c *cache) setMode(m mode.Mode) error {
+func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error {
var err error
turnOffMeta := m.NoMetabase()
if turnOffMeta && !c.mode.NoMetabase() {
- err = c.flush(true)
+ err = c.flush(ctx, prm.ignoreErrors)
if err != nil {
return err
}
}
- if !c.initialized.Load() {
- close(c.stopInitCh)
-
- c.initWG.Wait()
- c.stopInitCh = make(chan struct{})
-
- defer func() {
- if err == nil && !turnOffMeta {
- c.initFlushMarks()
- }
- }()
- }
-
- if c.db != nil {
- if err = c.db.Close(); err != nil {
- return fmt.Errorf("can't close write-cache database: %w", err)
- }
+ if err := c.closeStorage(ctx, prm.shrink); err != nil {
+ return err
}
// Suspend producers to ensure there are channel send operations in fly.
// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
// guarantees that there are no in-fly operations.
for len(c.flushCh) != 0 {
- c.log.Info("waiting for channels to flush")
+ c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush)
time.Sleep(time.Second)
}
@@ -68,7 +69,7 @@ func (c *cache) setMode(m mode.Mode) error {
return nil
}
- if err = c.openStore(m.ReadOnly()); err != nil {
+ if err = c.openStore(mode.ConvertToComponentModeDegraded(m)); err != nil {
return err
}
@@ -76,8 +77,52 @@ func (c *cache) setMode(m mode.Mode) error {
return nil
}
+func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
+ if c.fsTree == nil {
+ return nil
+ }
+ if !shrink {
+ if err := c.fsTree.Close(ctx); err != nil {
+ return fmt.Errorf("close write-cache storage: %w", err)
+ }
+ return nil
+ }
+
+ empty := true
+ _, err := c.fsTree.Iterate(ctx, common.IteratePrm{
+ Handler: func(common.IterationElement) error {
+ return errIterationCompleted
+ },
+ })
+ if err != nil {
+ if errors.Is(err, errIterationCompleted) {
+ empty = false
+ } else {
+ return fmt.Errorf("check write-cache items: %w", err)
+ }
+ }
+ if err := c.fsTree.Close(ctx); err != nil {
+ return fmt.Errorf("close write-cache storage: %w", err)
+ }
+ if empty {
+ err := os.RemoveAll(c.path)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("remove write-cache files: %w", err)
+ }
+ } else {
+ c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)
+ }
+ return nil
+}
+
// readOnly returns true if current mode is read-only.
// `c.modeMtx` must be taken.
func (c *cache) readOnly() bool {
return c.mode.ReadOnly()
}
+
+// noMetabase returns true if c is operating without the metabase.
+// `c.modeMtx` must be taken.
+func (c *cache) noMetabase() bool {
+ return c.mode.NoMetabase()
+}
diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go
new file mode 100644
index 0000000000..4fbadbc641
--- /dev/null
+++ b/pkg/local_object_storage/writecache/mode_test.go
@@ -0,0 +1,30 @@
+package writecache
+
+import (
+ "context"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMode(t *testing.T) {
+ t.Parallel()
+ wc := New(
+ WithLogger(test.NewLogger(t)),
+ WithFlushWorkersCount(2),
+ WithPath(t.TempDir()))
+
+ require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly))
+ require.Nil(t, wc.(*cache).fsTree)
+ require.NoError(t, wc.Init(context.Background()))
+ require.Nil(t, wc.(*cache).fsTree)
+ require.NoError(t, wc.Close(context.Background()))
+
+ require.NoError(t, wc.Open(context.Background(), mode.Degraded))
+ require.Nil(t, wc.(*cache).fsTree)
+ require.NoError(t, wc.Init(context.Background()))
+ require.Nil(t, wc.(*cache).fsTree)
+ require.NoError(t, wc.Close(context.Background()))
+}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 6385085282..a4f98ad064 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,66 +1,51 @@
package writecache
import (
- "time"
+ "context"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "go.uber.org/zap"
)
// Option represents write-cache configuration option.
type Option func(*options)
-// meta is an interface for a metabase.
-type metabase interface {
- Exists(meta.ExistsPrm) (meta.ExistsRes, error)
- StorageID(meta.StorageIDPrm) (meta.StorageIDRes, error)
- UpdateStorageID(meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error)
-}
-
-// blob is an interface for the blobstor.
-type blob interface {
- Put(common.PutPrm) (common.PutRes, error)
- NeedsCompression(obj *objectSDK.Object) bool
- Exists(res common.ExistsPrm) (common.ExistsRes, error)
-}
-
type options struct {
log *logger.Logger
// path is a path to a directory for write-cache.
path string
// blobstor is the main persistent storage.
- blobstor blob
+ blobstor MainStorage
// metabase is the metabase instance.
- metabase metabase
+ metabase Metabase
// maxObjectSize is the maximum size of the object stored in the write-cache.
maxObjectSize uint64
- // smallObjectSize is the maximum size of the object stored in the database.
- smallObjectSize uint64
// workersCount is the number of workers flushing objects in parallel.
workersCount int
// maxCacheSize is the maximum total size of all objects saved in cache (DB + FS).
// 1 GiB by default.
maxCacheSize uint64
- // objCounters contains atomic counters for the number of objects stored in cache.
- objCounters counters
- // maxBatchSize is the maximum batch size for the small object database.
- maxBatchSize int
- // maxBatchDelay is the maximum batch wait time for the small object database.
- maxBatchDelay time.Duration
+ // maxCacheCount is the maximum total count of all object saved in cache.
+ // 0 (no limit) by default.
+ maxCacheCount uint64
// noSync is true iff FSTree allows unsynchronized writes.
noSync bool
// reportError is the function called when encountering disk errors in background workers.
- reportError func(string, error)
+ reportError func(context.Context, string, error)
+ // metrics is metrics implementation
+ metrics Metrics
+ // disableBackgroundFlush is for testing purposes only.
+ disableBackgroundFlush bool
+ // flushSizeLimit is total size of flushing objects.
+ flushSizeLimit uint64
+ // qosLimiter used to limit flush RPS.
+ qosLimiter qos.Limiter
}
// WithLogger sets logger.
func WithLogger(log *logger.Logger) Option {
return func(o *options) {
- o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))}
+ o.log = log
}
}
@@ -72,14 +57,14 @@ func WithPath(path string) Option {
}
// WithBlobstor sets main object storage.
-func WithBlobstor(bs *blobstor.BlobStor) Option {
+func WithBlobstor(bs MainStorage) Option {
return func(o *options) {
o.blobstor = bs
}
}
// WithMetabase sets metabase.
-func WithMetabase(db *meta.DB) Option {
+func WithMetabase(db Metabase) Option {
return func(o *options) {
o.metabase = db
}
@@ -94,15 +79,6 @@ func WithMaxObjectSize(sz uint64) Option {
}
}
-// WithSmallObjectSize sets maximum object size to be stored in write-cache.
-func WithSmallObjectSize(sz uint64) Option {
- return func(o *options) {
- if sz > 0 {
- o.smallObjectSize = sz
- }
- }
-}
-
func WithFlushWorkersCount(c int) Option {
return func(o *options) {
if c > 0 {
@@ -118,21 +94,10 @@ func WithMaxCacheSize(sz uint64) Option {
}
}
-// WithMaxBatchSize sets max batch size for the small object database.
-func WithMaxBatchSize(sz int) Option {
+// WithMaxCacheCount sets maximum write-cache objects count.
+func WithMaxCacheCount(v uint64) Option {
return func(o *options) {
- if sz > 0 {
- o.maxBatchSize = sz
- }
- }
-}
-
-// WithMaxBatchDelay sets max batch delay for the small object database.
-func WithMaxBatchDelay(d time.Duration) Option {
- return func(o *options) {
- if d > 0 {
- o.maxBatchDelay = d
- }
+ o.maxCacheCount = v
}
}
@@ -147,8 +112,35 @@ func WithNoSync(noSync bool) Option {
}
// WithReportErrorFunc sets error reporting function.
-func WithReportErrorFunc(f func(string, error)) Option {
+func WithReportErrorFunc(f func(context.Context, string, error)) Option {
return func(o *options) {
o.reportError = f
}
}
+
+// WithMetrics sets metrics implementation.
+func WithMetrics(metrics Metrics) Option {
+ return func(o *options) {
+ o.metrics = metrics
+ }
+}
+
+// WithDisableBackgroundFlush disables background flush, for testing purposes only.
+func WithDisableBackgroundFlush() Option {
+ return func(o *options) {
+ o.disableBackgroundFlush = true
+ }
+}
+
+// WithFlushSizeLimit sets flush size limit.
+func WithFlushSizeLimit(v uint64) Option {
+ return func(o *options) {
+ o.flushSizeLimit = v
+ }
+}
+
+func WithQoSLimiter(l qos.Limiter) Option {
+ return func(o *options) {
+ o.qosLimiter = l
+ }
+}
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index 7791e93dc0..2fbf509137 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -1,18 +1,16 @@
package writecache
import (
- "errors"
+ "context"
+ "fmt"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
- "go.etcd.io/bbolt"
-)
-
-var (
- // ErrBigObject is returned when object is too big to be placed in cache.
- ErrBigObject = errors.New("too big object")
- // ErrOutOfSpace is returned when there is no space left to put a new object.
- ErrOutOfSpace = errors.New("no space left in the write cache")
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Put puts object to write-cache.
@@ -21,13 +19,30 @@ var (
// Returns ErrNotInitialized if write-cache has not been initialized yet.
// Returns ErrOutOfSpace if saving an object leads to WC's size overflow.
// Returns ErrBigObject if an objects exceeds maximum object size.
-func (c *cache) Put(prm common.PutPrm) (common.PutRes, error) {
- c.modeMtx.RLock()
+func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Put",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("dont_compress", prm.DontCompress),
+ ))
+ defer span.End()
+
+ startedAt := time.Now()
+ added := false
+ storageType := StorageTypeUndefined
+ defer func() {
+ c.metrics.Put(time.Since(startedAt), added, storageType)
+ }()
+
+ if !c.modeMtx.TryRLock() {
+ return common.PutRes{}, ErrNotInitialized
+ }
defer c.modeMtx.RUnlock()
if c.readOnly() {
return common.PutRes{}, ErrReadOnly
- } else if !c.initialized.Load() {
- return common.PutRes{}, ErrNotInitialized
+ }
+ if c.noMetabase() {
+ return common.PutRes{}, ErrDegraded
}
sz := uint64(len(prm.RawData))
@@ -35,64 +50,40 @@ func (c *cache) Put(prm common.PutPrm) (common.PutRes, error) {
return common.PutRes{}, ErrBigObject
}
- oi := objectInfo{
- addr: prm.Address.EncodeToString(),
- obj: prm.Object,
- data: prm.RawData,
- }
-
- if sz <= c.smallObjectSize {
- return common.PutRes{}, c.putSmall(oi)
- }
- return common.PutRes{}, c.putBig(oi.addr, prm)
-}
-
-// putSmall persists small objects to the write-cache database and
-// pushes the to the flush workers queue.
-func (c *cache) putSmall(obj objectInfo) error {
- cacheSize := c.estimateCacheSize()
- if c.maxCacheSize < c.incSizeDB(cacheSize) {
- return ErrOutOfSpace
- }
-
- err := c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- return b.Put([]byte(obj.addr), obj.data)
- })
+ storageType = StorageTypeFSTree
+ err := c.putBig(ctx, prm)
if err == nil {
- storagelog.Write(c.log,
- storagelog.AddressField(obj.addr),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db PUT"),
- )
- c.objCounters.IncDB()
+ added = true
}
- return nil
+ return common.PutRes{}, metaerr.Wrap(err)
}
// putBig writes object to FSTree and pushes it to the flush workers queue.
-func (c *cache) putBig(addr string, prm common.PutPrm) error {
- cacheSz := c.estimateCacheSize()
- if c.maxCacheSize < c.incSizeFS(cacheSz) {
+func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
+ if prm.RawData == nil { // foolproof: RawData should be marshalled by shard.
+ data, err := prm.Object.Marshal()
+ if err != nil {
+ return fmt.Errorf("cannot marshal object: %w", err)
+ }
+ prm.RawData = data
+ }
+ size := uint64(len(prm.RawData))
+ if !c.hasEnoughSpace(size) {
return ErrOutOfSpace
}
- _, err := c.fsTree.Put(prm)
+ _, err := c.fsTree.Put(ctx, prm)
if err != nil {
return err
}
- if c.blobstor.NeedsCompression(prm.Object) {
- c.mtx.Lock()
- c.compressFlags[addr] = struct{}{}
- c.mtx.Unlock()
- }
- c.objCounters.IncFS()
- storagelog.Write(c.log,
- storagelog.AddressField(addr),
+ storagelog.Write(ctx, c.log,
+ storagelog.AddressField(prm.Address.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree PUT"),
)
+ // counter changed by fstree
+ c.estimateCacheSize()
return nil
}
diff --git a/pkg/local_object_storage/writecache/seal.go b/pkg/local_object_storage/writecache/seal.go
new file mode 100644
index 0000000000..fa224f5e0c
--- /dev/null
+++ b/pkg/local_object_storage/writecache/seal.go
@@ -0,0 +1,37 @@
+package writecache
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func (c *cache) Seal(ctx context.Context, prm SealPrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Seal",
+ trace.WithAttributes(
+ attribute.Bool("ignore_errors", prm.IgnoreErrors),
+ attribute.Bool("restore_mode", prm.RestoreMode),
+ ))
+ defer span.End()
+
+ c.modeMtx.Lock()
+ defer c.modeMtx.Unlock()
+
+ sourceMode := c.mode
+ // flush will be done by setMode
+ err := c.setMode(ctx, mode.DegradedReadOnly, setModePrm{ignoreErrors: prm.IgnoreErrors, shrink: prm.Shrink})
+ if err != nil {
+ return err
+ }
+ c.metrics.SetMode(mode.ComponentDisabled)
+ if prm.RestoreMode {
+ err = c.setMode(ctx, sourceMode, setModePrm{ignoreErrors: prm.IgnoreErrors})
+ if err == nil {
+ c.metrics.SetMode(mode.ConvertToComponentMode(sourceMode))
+ }
+ }
+ return err
+}
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index 1ba5a4bd3b..7a52d36728 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -1,72 +1,20 @@
package writecache
-import (
- "fmt"
-
- "go.etcd.io/bbolt"
- "go.uber.org/atomic"
-)
-
-func (c *cache) estimateCacheSize() uint64 {
- return c.objCounters.DB()*c.smallObjectSize + c.objCounters.FS()*c.maxObjectSize
+func (c *cache) estimateCacheSize() (uint64, uint64) {
+ count, size := c.counter.CountSize()
+ c.metrics.SetEstimateSize(size)
+ c.metrics.SetActualCounters(count)
+ return count, size
}
-func (c *cache) incSizeDB(sz uint64) uint64 {
- return sz + c.smallObjectSize
-}
-
-func (c *cache) incSizeFS(sz uint64) uint64 {
- return sz + c.maxObjectSize
-}
-
-type counters struct {
- cDB, cFS atomic.Uint64
-}
-
-func (x *counters) IncDB() {
- x.cDB.Inc()
-}
-
-func (x *counters) DecDB() {
- x.cDB.Dec()
-}
-
-func (x *counters) DB() uint64 {
- return x.cDB.Load()
-}
-
-func (x *counters) IncFS() {
- x.cFS.Inc()
-}
-
-func (x *counters) DecFS() {
- x.cFS.Dec()
-}
-
-func (x *counters) FS() uint64 {
- return x.cFS.Load()
-}
-
-func (c *cache) initCounters() error {
- var inDB uint64
- err := c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- if b != nil {
- inDB = uint64(b.Stats().KeyN)
- }
- return nil
- })
- if err != nil {
- return fmt.Errorf("could not read write-cache DB counter: %w", err)
+func (c *cache) hasEnoughSpace(objectSize uint64) bool {
+ count, size := c.estimateCacheSize()
+ if c.maxCacheCount > 0 && count+1 > c.maxCacheCount {
+ return false
}
-
- inFS, err := c.fsTree.NumberOfObjects()
- if err != nil {
- return fmt.Errorf("could not read write-cache FS counter: %w", err)
- }
-
- c.objCounters.cDB.Store(inDB)
- c.objCounters.cFS.Store(inFS)
-
- return nil
+ return c.maxCacheSize >= size+objectSize
+}
+
+func (c *cache) initCounters() {
+ c.estimateCacheSize()
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 02c79d380e..e88566cdfc 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -1,167 +1,57 @@
package writecache
import (
- "errors"
+ "context"
"fmt"
"os"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- lru "github.com/hashicorp/golang-lru/v2"
- "github.com/hashicorp/golang-lru/v2/simplelru"
- "go.etcd.io/bbolt"
"go.uber.org/zap"
)
-// store represents persistent storage with in-memory LRU cache
-// for flushed items on top of it.
-type store struct {
- maxFlushedMarksCount int
- maxRemoveBatchSize int
-
- // flushed contains addresses of objects that were already flushed to the main storage.
- // We use LRU cache instead of map here to facilitate removing of unused object in favour of
- // frequently read ones.
- // MUST NOT be used inside bolt db transaction because it's eviction handler
- // removes untracked items from the database.
- flushed simplelru.LRUCache[string, bool]
- db *bbolt.DB
-
- dbKeysToRemove []string
- fsKeysToRemove []string
-}
-
-const dbName = "small.bolt"
-
-func (c *cache) openStore(readOnly bool) error {
+func (c *cache) openStore(mod mode.ComponentMode) error {
err := util.MkdirAllX(c.path, os.ModePerm)
if err != nil {
return err
}
- c.db, err = OpenDB(c.path, readOnly)
- if err != nil {
- return fmt.Errorf("could not open database: %w", err)
- }
-
- c.db.MaxBatchSize = c.maxBatchSize
- c.db.MaxBatchDelay = c.maxBatchDelay
-
- if !readOnly {
- err = c.db.Update(func(tx *bbolt.Tx) error {
- _, err := tx.CreateBucketIfNotExists(defaultBucket)
- return err
- })
- if err != nil {
- return fmt.Errorf("could not create default bucket: %w", err)
- }
- }
-
c.fsTree = fstree.New(
fstree.WithPath(c.path),
fstree.WithPerm(os.ModePerm),
fstree.WithDepth(1),
fstree.WithDirNameLen(1),
- fstree.WithNoSync(c.noSync))
- if err := c.fsTree.Open(readOnly); err != nil {
- return fmt.Errorf("could not open FSTree: %w", err)
+ fstree.WithNoSync(c.noSync),
+ fstree.WithFileCounter(c.counter),
+ )
+ if err := c.fsTree.Open(mod); err != nil {
+ return fmt.Errorf("open FSTree: %w", err)
}
-
- // Write-cache can be opened multiple times during `SetMode`.
- // flushed map must not be re-created in this case.
- if c.flushed == nil {
- c.flushed, _ = lru.NewWithEvict[string, bool](c.maxFlushedMarksCount, c.removeFlushed)
+ if err := c.fsTree.Init(); err != nil {
+ return fmt.Errorf("init FSTree: %w", err)
}
- c.initialized.Store(false)
-
return nil
}
-// removeFlushed removes an object from the writecache.
-// To minimize interference with the client operations, the actual removal
-// is done in batches.
-// It is not thread-safe and is used only as an evict callback to LRU cache.
-func (c *cache) removeFlushed(key string, value bool) {
- fromDatabase := value
- if fromDatabase {
- c.dbKeysToRemove = append(c.dbKeysToRemove, key)
- } else {
- c.fsKeysToRemove = append(c.fsKeysToRemove, key)
- }
-
- if len(c.dbKeysToRemove)+len(c.fsKeysToRemove) >= c.maxRemoveBatchSize {
- c.dbKeysToRemove = c.deleteFromDB(c.dbKeysToRemove)
- c.fsKeysToRemove = c.deleteFromDisk(c.fsKeysToRemove)
- }
-}
-
-func (c *cache) deleteFromDB(keys []string) []string {
- if len(keys) == 0 {
- return keys
- }
-
- var errorIndex int
- err := c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- for errorIndex = range keys {
- if err := b.Delete([]byte(keys[errorIndex])); err != nil {
- return err
- }
- }
- return nil
- })
- for i := 0; i < errorIndex; i++ {
- c.objCounters.DecDB()
- storagelog.Write(c.log,
- storagelog.AddressField(keys[i]),
+func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) {
+ _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size})
+ if err != nil && !client.IsErrObjectNotFound(err) {
+ c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
+ } else if err == nil {
+ storagelog.Write(ctx, c.log,
+ storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db DELETE"),
+ storagelog.OpField("fstree DELETE"),
)
+ c.metrics.Evict(StorageTypeFSTree)
+ // counter changed by fstree
+ c.estimateCacheSize()
}
- if err != nil {
- c.log.Error("can't remove objects from the database", zap.Error(err))
- }
-
- copy(keys, keys[errorIndex:])
- return keys[:len(keys)-errorIndex]
-}
-
-func (c *cache) deleteFromDisk(keys []string) []string {
- if len(keys) == 0 {
- return keys
- }
-
- var copyIndex int
- var addr oid.Address
-
- for i := range keys {
- if err := addr.DecodeString(keys[i]); err != nil {
- c.log.Error("can't parse address", zap.String("address", keys[i]))
- continue
- }
-
- _, err := c.fsTree.Delete(common.DeletePrm{Address: addr})
- if err != nil && !errors.As(err, new(apistatus.ObjectNotFound)) {
- c.log.Error("can't remove object from write-cache", zap.Error(err))
-
- // Save the key for the next iteration.
- keys[copyIndex] = keys[i]
- copyIndex++
- continue
- } else if err == nil {
- storagelog.Write(c.log,
- storagelog.AddressField(keys[i]),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("fstree DELETE"),
- )
- c.objCounters.DecFS()
- }
- }
-
- return keys[:copyIndex]
}
diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go
new file mode 100644
index 0000000000..5eb341ba45
--- /dev/null
+++ b/pkg/local_object_storage/writecache/upgrade.go
@@ -0,0 +1,110 @@
+package writecache
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "time"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+)
+
+const dbName = "small.bolt"
+
+var defaultBucket = []byte{0}
+
+func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
+ _, err := os.Stat(filepath.Join(c.path, dbName))
+ if err != nil && os.IsNotExist(err) {
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("check write-cache database existence: %w", err)
+ }
+ db, err := OpenDB(c.path, true, os.OpenFile)
+ if err != nil {
+ return fmt.Errorf("open write-cache database: %w", err)
+ }
+ defer func() {
+ _ = db.Close()
+ }()
+
+ var last string
+ for {
+ batch, err := c.readNextDBBatch(db, last)
+ if err != nil {
+ return err
+ }
+ if len(batch) == 0 {
+ break
+ }
+ for _, item := range batch {
+ var obj objectSDK.Object
+ if err := obj.Unmarshal(item.data); err != nil {
+ return fmt.Errorf("unmarshal object from database: %w", err)
+ }
+ if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil {
+ return fmt.Errorf("flush object from database: %w", err)
+ }
+ }
+ last = batch[len(batch)-1].address
+ }
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("close write-cache database: %w", err)
+ }
+ if err := os.Remove(filepath.Join(c.path, dbName)); err != nil {
+ return fmt.Errorf("remove write-cache database: %w", err)
+ }
+ return nil
+}
+
+type batchItem struct {
+ data []byte
+ address string
+}
+
+func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) {
+ const batchSize = 100
+ var batch []batchItem
+ err := db.View(func(tx *bbolt.Tx) error {
+ var addr oid.Address
+
+ b := tx.Bucket(defaultBucket)
+ cs := b.Cursor()
+ for k, data := cs.Seek([]byte(last)); k != nil; k, data = cs.Next() {
+ sa := string(k)
+ if sa == last {
+ continue
+ }
+ if err := addr.DecodeString(sa); err != nil {
+ return fmt.Errorf("decode address from database: %w", err)
+ }
+
+ batch = append(batch, batchItem{data: bytes.Clone(data), address: sa})
+ if len(batch) == batchSize {
+ return errIterationCompleted
+ }
+ }
+ return nil
+ })
+ if err == nil || errors.Is(err, errIterationCompleted) {
+ return batch, nil
+ }
+ return nil, err
+}
+
+// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
+func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
+ return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
+ NoFreelistSync: true,
+ ReadOnly: ro,
+ Timeout: 100 * time.Millisecond,
+ OpenFile: openFile,
+ })
+}
diff --git a/pkg/local_object_storage/writecache/util.go b/pkg/local_object_storage/writecache/util.go
deleted file mode 100644
index bc6da4aa88..0000000000
--- a/pkg/local_object_storage/writecache/util.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package writecache
-
-import (
- "os"
- "path/filepath"
- "time"
-
- "go.etcd.io/bbolt"
-)
-
-// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
-func OpenDB(p string, ro bool) (*bbolt.DB, error) {
- return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
- NoFreelistSync: true,
- ReadOnly: ro,
- Timeout: 100 * time.Millisecond,
- })
-}
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index 2fe7d44bc1..7ed5113189 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -1,17 +1,17 @@
package writecache
import (
- "sync"
+ "context"
+ "errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
- "go.uber.org/atomic"
- "go.uber.org/zap"
)
// Info groups the information about write-cache.
@@ -20,166 +20,57 @@ type Info struct {
Path string
}
+type SealPrm struct {
+ IgnoreErrors bool
+ RestoreMode bool
+ Shrink bool
+}
+
// Cache represents write-cache for objects.
type Cache interface {
- Get(address oid.Address) (*object.Object, error)
- Head(oid.Address) (*object.Object, error)
+ Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error)
+ Head(context.Context, oid.Address) (*objectSDK.Object, error)
// Delete removes object referenced by the given oid.Address from the
// Cache. Returns any error encountered that prevented the object to be
// removed.
//
// Returns apistatus.ObjectNotFound if object is missing in the Cache.
// Returns ErrReadOnly if the Cache is currently in the read-only mode.
- Delete(oid.Address) error
- Iterate(IterationPrm) error
- Put(common.PutPrm) (common.PutRes, error)
- SetMode(mode.Mode) error
+ Delete(context.Context, oid.Address) error
+ Put(context.Context, common.PutPrm) (common.PutRes, error)
+ SetMode(context.Context, mode.Mode) error
SetLogger(*logger.Logger)
DumpInfo() Info
- Flush(bool) error
+ Flush(context.Context, bool, bool) error
+ Seal(context.Context, SealPrm) error
- Init() error
- Open(readOnly bool) error
- Close() error
+ Init(context.Context) error
+ Open(ctx context.Context, mode mode.Mode) error
+ Close(context.Context) error
+ GetMetrics() Metrics
}
-type cache struct {
- options
-
- // mtx protects statistics, counters and compressFlags.
- mtx sync.RWMutex
-
- mode mode.Mode
- initialized atomic.Bool
- stopInitCh chan struct{} // used to sync initWG initialisation routines and _only_ them
- initWG sync.WaitGroup // for initialisation routines only
- modeMtx sync.RWMutex
-
- // compressFlags maps address of a big object to boolean value indicating
- // whether object should be compressed.
- compressFlags map[string]struct{}
-
- // flushCh is a channel with objects to flush.
- flushCh chan *object.Object
- // closeCh is close channel, protected by modeMtx.
- closeCh chan struct{}
- // wg is a wait group for flush workers.
- wg sync.WaitGroup
- // store contains underlying database.
- store
- // fsTree contains big files stored directly on file-system.
- fsTree *fstree.FSTree
+// MainStorage is the interface of the underlying storage of Cache implementations.
+type MainStorage interface {
+ Compressor() *compression.Compressor
+ Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error)
+ Put(context.Context, common.PutPrm) (common.PutRes, error)
}
-// wcStorageType is used for write-cache operations logging.
-const wcStorageType = "write-cache"
-
-type objectInfo struct {
- addr string
- data []byte
- obj *object.Object
+// Metabase is the interface of the metabase used by Cache implementations.
+type Metabase interface {
+ UpdateStorageID(context.Context, meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error)
}
-const (
- defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB
- defaultSmallObjectSize = 32 * 1024 // 32 KiB
- defaultMaxCacheSize = 1 << 30 // 1 GiB
-)
-
var (
- defaultBucket = []byte{0}
+ // ErrReadOnly is returned when Put/Write is performed in a read-only mode.
+ ErrReadOnly = logicerr.New("write-cache is in read-only mode")
+ // ErrDegraded is returned when writecache is in degraded mode.
+ ErrDegraded = logicerr.New("write-cache is in degraded mode")
+ // ErrNotInitialized is returned when write-cache is initializing.
+ ErrNotInitialized = logicerr.New("write-cache is not initialized yet")
+ // ErrBigObject is returned when object is too big to be placed in cache.
+ ErrBigObject = errors.New("too big object")
+ // ErrOutOfSpace is returned when there is no space left to put a new object.
+ ErrOutOfSpace = errors.New("no space left in the write cache")
)
-
-// New creates new writecache instance.
-func New(opts ...Option) Cache {
- c := &cache{
- flushCh: make(chan *object.Object),
- mode: mode.ReadWrite,
- stopInitCh: make(chan struct{}),
-
- compressFlags: make(map[string]struct{}),
- options: options{
- log: &logger.Logger{Logger: zap.NewNop()},
- maxObjectSize: defaultMaxObjectSize,
- smallObjectSize: defaultSmallObjectSize,
- workersCount: defaultFlushWorkersCount,
- maxCacheSize: defaultMaxCacheSize,
- maxBatchSize: bbolt.DefaultMaxBatchSize,
- maxBatchDelay: bbolt.DefaultMaxBatchDelay,
- },
- }
-
- for i := range opts {
- opts[i](&c.options)
- }
-
- // Make the LRU cache contain which take approximately 3/4 of the maximum space.
- // Assume small and big objects are stored in 50-50 proportion.
- c.maxFlushedMarksCount = int(c.maxCacheSize/c.maxObjectSize+c.maxCacheSize/c.smallObjectSize) / 2 * 3 / 4
- // Trigger the removal when the cache is 7/8 full, so that new items can still arrive.
- c.maxRemoveBatchSize = c.maxFlushedMarksCount / 8
-
- return c
-}
-
-// SetLogger sets logger. It is used after the shard ID was generated to use it in logs.
-func (c *cache) SetLogger(l *logger.Logger) {
- c.log = l
-}
-
-func (c *cache) DumpInfo() Info {
- return Info{
- Path: c.path,
- }
-}
-
-// Open opens and initializes database. Reads object counters from the ObjectCounters instance.
-func (c *cache) Open(readOnly bool) error {
- err := c.openStore(readOnly)
- if err != nil {
- return err
- }
-
- // Opening after Close is done during maintenance mode,
- // thus we need to create a channel here.
- c.closeCh = make(chan struct{})
-
- return c.initCounters()
-}
-
-// Init runs necessary services.
-func (c *cache) Init() error {
- c.initFlushMarks()
- c.runFlushLoop()
- return nil
-}
-
-// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op.
-func (c *cache) Close() error {
- c.modeMtx.Lock()
- defer c.modeMtx.Unlock()
-
- // Finish all in-progress operations.
- if err := c.setMode(mode.ReadOnly); err != nil {
- return err
- }
-
- if c.closeCh != nil {
- close(c.closeCh)
- }
- c.wg.Wait()
- if c.closeCh != nil {
- c.closeCh = nil
- }
-
- c.initialized.Store(false)
-
- var err error
- if c.db != nil {
- err = c.db.Close()
- if err != nil {
- c.db = nil
- }
- }
- return nil
-}
diff --git a/pkg/metrics/engine.go b/pkg/metrics/engine.go
deleted file mode 100644
index 2696d4a84c..0000000000
--- a/pkg/metrics/engine.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package metrics
-
-import (
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type (
- engineMetrics struct {
- listContainersDuration prometheus.Counter
- estimateContainerSizeDuration prometheus.Counter
- deleteDuration prometheus.Counter
- existsDuration prometheus.Counter
- getDuration prometheus.Counter
- headDuration prometheus.Counter
- inhumeDuration prometheus.Counter
- putDuration prometheus.Counter
- rangeDuration prometheus.Counter
- searchDuration prometheus.Counter
- listObjectsDuration prometheus.Counter
- containerSize prometheus.GaugeVec
- payloadSize prometheus.GaugeVec
- }
-)
-
-const engineSubsystem = "engine"
-
-func newEngineMetrics() engineMetrics {
- var (
- listContainersDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "list_containers_duration",
- Help: "Accumulated duration of engine list containers operations",
- })
-
- estimateContainerSizeDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "estimate_container_size_duration",
- Help: "Accumulated duration of engine container size estimate operations",
- })
-
- deleteDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "delete_duration",
- Help: "Accumulated duration of engine delete operations",
- })
-
- existsDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "exists_duration",
- Help: "Accumulated duration of engine exists operations",
- })
-
- getDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "get_duration",
- Help: "Accumulated duration of engine get operations",
- })
-
- headDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "head_duration",
- Help: "Accumulated duration of engine head operations",
- })
-
- inhumeDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "inhume_duration",
- Help: "Accumulated duration of engine inhume operations",
- })
-
- putDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "put_duration",
- Help: "Accumulated duration of engine put operations",
- })
-
- rangeDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "range_duration",
- Help: "Accumulated duration of engine range operations",
- })
-
- searchDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "search_duration",
- Help: "Accumulated duration of engine search operations",
- })
-
- listObjectsDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "list_objects_duration",
- Help: "Accumulated duration of engine list objects operations",
- })
-
- containerSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "container_size",
- Help: "Accumulated size of all objects in a container",
- }, []string{containerIDLabelKey})
-
- payloadSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "payload_size",
- Help: "Accumulated size of all objects in a shard",
- }, []string{shardIDLabelKey})
- )
-
- return engineMetrics{
- listContainersDuration: listContainersDuration,
- estimateContainerSizeDuration: estimateContainerSizeDuration,
- deleteDuration: deleteDuration,
- existsDuration: existsDuration,
- getDuration: getDuration,
- headDuration: headDuration,
- inhumeDuration: inhumeDuration,
- putDuration: putDuration,
- rangeDuration: rangeDuration,
- searchDuration: searchDuration,
- listObjectsDuration: listObjectsDuration,
- containerSize: *containerSize,
- payloadSize: *payloadSize,
- }
-}
-
-func (m engineMetrics) register() {
- prometheus.MustRegister(m.listContainersDuration)
- prometheus.MustRegister(m.estimateContainerSizeDuration)
- prometheus.MustRegister(m.deleteDuration)
- prometheus.MustRegister(m.existsDuration)
- prometheus.MustRegister(m.getDuration)
- prometheus.MustRegister(m.headDuration)
- prometheus.MustRegister(m.inhumeDuration)
- prometheus.MustRegister(m.putDuration)
- prometheus.MustRegister(m.rangeDuration)
- prometheus.MustRegister(m.searchDuration)
- prometheus.MustRegister(m.listObjectsDuration)
- prometheus.MustRegister(m.containerSize)
- prometheus.MustRegister(m.payloadSize)
-}
-
-func (m engineMetrics) AddListContainersDuration(d time.Duration) {
- m.listObjectsDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddEstimateContainerSizeDuration(d time.Duration) {
- m.estimateContainerSizeDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddDeleteDuration(d time.Duration) {
- m.deleteDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddExistsDuration(d time.Duration) {
- m.existsDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddGetDuration(d time.Duration) {
- m.getDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddHeadDuration(d time.Duration) {
- m.headDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddInhumeDuration(d time.Duration) {
- m.inhumeDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddPutDuration(d time.Duration) {
- m.putDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddRangeDuration(d time.Duration) {
- m.rangeDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddSearchDuration(d time.Duration) {
- m.searchDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddListObjectsDuration(d time.Duration) {
- m.listObjectsDuration.Add(float64(d))
-}
-
-func (m engineMetrics) AddToContainerSize(cnrID string, size int64) {
- m.containerSize.With(prometheus.Labels{containerIDLabelKey: cnrID}).Add(float64(size))
-}
-
-func (m engineMetrics) AddToPayloadCounter(shardID string, size int64) {
- m.payloadSize.With(prometheus.Labels{shardIDLabelKey: shardID}).Add(float64(size))
-}
diff --git a/pkg/metrics/innerring.go b/pkg/metrics/innerring.go
deleted file mode 100644
index 55b0aa0894..0000000000
--- a/pkg/metrics/innerring.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package metrics
-
-import "github.com/prometheus/client_golang/prometheus"
-
-const innerRingSubsystem = "ir"
-
-// InnerRingServiceMetrics contains metrics collected by inner ring.
-type InnerRingServiceMetrics struct {
- epoch prometheus.Gauge
- health prometheus.Gauge
-}
-
-// NewInnerRingMetrics returns new instance of metrics collectors for inner ring.
-func NewInnerRingMetrics() InnerRingServiceMetrics {
- var (
- epoch = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: innerRingSubsystem,
- Name: "epoch",
- Help: "Current epoch as seen by inner-ring node.",
- })
- health = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: innerRingSubsystem,
- Name: "health",
- Help: "Current inner-ring node state.",
- })
- )
-
- prometheus.MustRegister(epoch)
- prometheus.MustRegister(health)
-
- return InnerRingServiceMetrics{
- epoch: epoch,
- health: health,
- }
-}
-
-// SetEpoch updates epoch metrics.
-func (m InnerRingServiceMetrics) SetEpoch(epoch uint64) {
- m.epoch.Set(float64(epoch))
-}
-
-// SetHealth updates health metrics.
-func (m InnerRingServiceMetrics) SetHealth(s int32) {
- m.health.Set(float64(s))
-}
diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go
deleted file mode 100644
index 640da2d5f3..0000000000
--- a/pkg/metrics/metrics.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package metrics
-
-import "github.com/prometheus/client_golang/prometheus"
-
-const namespace = "frostfs_node"
-
-type NodeMetrics struct {
- objectServiceMetrics
- engineMetrics
- stateMetrics
- epoch prometheus.Gauge
-}
-
-func NewNodeMetrics() *NodeMetrics {
- objectService := newObjectServiceMetrics()
- objectService.register()
-
- engine := newEngineMetrics()
- engine.register()
-
- state := newStateMetrics()
- state.register()
-
- epoch := prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: innerRingSubsystem,
- Name: "epoch",
- Help: "Current epoch as seen by inner-ring node.",
- })
- prometheus.MustRegister(epoch)
-
- return &NodeMetrics{
- objectServiceMetrics: objectService,
- engineMetrics: engine,
- stateMetrics: state,
- epoch: epoch,
- }
-}
-
-// SetEpoch updates epoch metric.
-func (m *NodeMetrics) SetEpoch(epoch uint64) {
- m.epoch.Set(float64(epoch))
-}
diff --git a/pkg/metrics/object.go b/pkg/metrics/object.go
deleted file mode 100644
index 0bb16b7801..0000000000
--- a/pkg/metrics/object.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-const objectSubsystem = "object"
-
-type (
- methodCount struct {
- success prometheus.Counter
- total prometheus.Counter
- }
-
- objectServiceMetrics struct {
- getCounter methodCount
- putCounter methodCount
- headCounter methodCount
- searchCounter methodCount
- deleteCounter methodCount
- rangeCounter methodCount
- rangeHashCounter methodCount
-
- getDuration prometheus.Counter
- putDuration prometheus.Counter
- headDuration prometheus.Counter
- searchDuration prometheus.Counter
- deleteDuration prometheus.Counter
- rangeDuration prometheus.Counter
- rangeHashDuration prometheus.Counter
-
- putPayload prometheus.Counter
- getPayload prometheus.Counter
-
- shardMetrics *prometheus.GaugeVec
- shardsReadonly *prometheus.GaugeVec
- }
-)
-
-const (
- shardIDLabelKey = "shard"
- counterTypeLabelKey = "type"
- containerIDLabelKey = "cid"
-)
-
-func newMethodCallCounter(name string) methodCount {
- return methodCount{
- success: prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: fmt.Sprintf("%s_req_count_success", name),
- Help: fmt.Sprintf("The number of successful %s requests processed", name),
- }),
- total: prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: fmt.Sprintf("%s_req_count", name),
- Help: fmt.Sprintf("Total number of %s requests processed", name),
- }),
- }
-}
-
-func (m methodCount) mustRegister() {
- prometheus.MustRegister(m.success)
- prometheus.MustRegister(m.total)
-}
-
-func (m methodCount) Inc(success bool) {
- m.total.Inc()
- if success {
- m.success.Inc()
- }
-}
-
-func newObjectServiceMetrics() objectServiceMetrics {
- var ( // Request counter metrics.
- getCounter = newMethodCallCounter("get")
- putCounter = newMethodCallCounter("put")
- headCounter = newMethodCallCounter("head")
- searchCounter = newMethodCallCounter("search")
- deleteCounter = newMethodCallCounter("delete")
- rangeCounter = newMethodCallCounter("range")
- rangeHashCounter = newMethodCallCounter("range_hash")
- )
-
- var ( // Request duration metrics.
- getDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "get_req_duration",
- Help: "Accumulated get request process duration",
- })
-
- putDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "put_req_duration",
- Help: "Accumulated put request process duration",
- })
-
- headDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "head_req_duration",
- Help: "Accumulated head request process duration",
- })
-
- searchDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "search_req_duration",
- Help: "Accumulated search request process duration",
- })
-
- deleteDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "delete_req_duration",
- Help: "Accumulated delete request process duration",
- })
-
- rangeDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "range_req_duration",
- Help: "Accumulated range request process duration",
- })
-
- rangeHashDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "range_hash_req_duration",
- Help: "Accumulated range hash request process duration",
- })
- )
-
- var ( // Object payload metrics.
- putPayload = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "put_payload",
- Help: "Accumulated payload size at object put method",
- })
-
- getPayload = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "get_payload",
- Help: "Accumulated payload size at object get method",
- })
-
- shardsMetrics = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "counter",
- Help: "Objects counters per shards",
- },
- []string{shardIDLabelKey, counterTypeLabelKey},
- )
-
- shardsReadonly = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "readonly",
- Help: "Shard state",
- },
- []string{shardIDLabelKey},
- )
- )
-
- return objectServiceMetrics{
- getCounter: getCounter,
- putCounter: putCounter,
- headCounter: headCounter,
- searchCounter: searchCounter,
- deleteCounter: deleteCounter,
- rangeCounter: rangeCounter,
- rangeHashCounter: rangeHashCounter,
- getDuration: getDuration,
- putDuration: putDuration,
- headDuration: headDuration,
- searchDuration: searchDuration,
- deleteDuration: deleteDuration,
- rangeDuration: rangeDuration,
- rangeHashDuration: rangeHashDuration,
- putPayload: putPayload,
- getPayload: getPayload,
- shardMetrics: shardsMetrics,
- shardsReadonly: shardsReadonly,
- }
-}
-
-func (m objectServiceMetrics) register() {
- m.getCounter.mustRegister()
- m.putCounter.mustRegister()
- m.headCounter.mustRegister()
- m.searchCounter.mustRegister()
- m.deleteCounter.mustRegister()
- m.rangeCounter.mustRegister()
- m.rangeHashCounter.mustRegister()
-
- prometheus.MustRegister(m.getDuration)
- prometheus.MustRegister(m.putDuration)
- prometheus.MustRegister(m.headDuration)
- prometheus.MustRegister(m.searchDuration)
- prometheus.MustRegister(m.deleteDuration)
- prometheus.MustRegister(m.rangeDuration)
- prometheus.MustRegister(m.rangeHashDuration)
-
- prometheus.MustRegister(m.putPayload)
- prometheus.MustRegister(m.getPayload)
-
- prometheus.MustRegister(m.shardMetrics)
- prometheus.MustRegister(m.shardsReadonly)
-}
-
-func (m objectServiceMetrics) IncGetReqCounter(success bool) {
- m.getCounter.Inc(success)
-}
-
-func (m objectServiceMetrics) IncPutReqCounter(success bool) {
- m.putCounter.Inc(success)
-}
-
-func (m objectServiceMetrics) IncHeadReqCounter(success bool) {
- m.headCounter.Inc(success)
-}
-
-func (m objectServiceMetrics) IncSearchReqCounter(success bool) {
- m.searchCounter.Inc(success)
-}
-
-func (m objectServiceMetrics) IncDeleteReqCounter(success bool) {
- m.deleteCounter.Inc(success)
-}
-
-func (m objectServiceMetrics) IncRangeReqCounter(success bool) {
- m.rangeCounter.Inc(success)
-}
-
-func (m objectServiceMetrics) IncRangeHashReqCounter(success bool) {
- m.rangeHashCounter.Inc(success)
-}
-
-func (m objectServiceMetrics) AddGetReqDuration(d time.Duration) {
- m.getDuration.Add(float64(d))
-}
-
-func (m objectServiceMetrics) AddPutReqDuration(d time.Duration) {
- m.putDuration.Add(float64(d))
-}
-
-func (m objectServiceMetrics) AddHeadReqDuration(d time.Duration) {
- m.headDuration.Add(float64(d))
-}
-
-func (m objectServiceMetrics) AddSearchReqDuration(d time.Duration) {
- m.searchDuration.Add(float64(d))
-}
-
-func (m objectServiceMetrics) AddDeleteReqDuration(d time.Duration) {
- m.deleteDuration.Add(float64(d))
-}
-
-func (m objectServiceMetrics) AddRangeReqDuration(d time.Duration) {
- m.rangeDuration.Add(float64(d))
-}
-
-func (m objectServiceMetrics) AddRangeHashReqDuration(d time.Duration) {
- m.rangeHashDuration.Add(float64(d))
-}
-
-func (m objectServiceMetrics) AddPutPayload(ln int) {
- m.putPayload.Add(float64(ln))
-}
-
-func (m objectServiceMetrics) AddGetPayload(ln int) {
- m.getPayload.Add(float64(ln))
-}
-
-func (m objectServiceMetrics) AddToObjectCounter(shardID, objectType string, delta int) {
- m.shardMetrics.With(
- prometheus.Labels{
- shardIDLabelKey: shardID,
- counterTypeLabelKey: objectType,
- },
- ).Add(float64(delta))
-}
-
-func (m objectServiceMetrics) SetObjectCounter(shardID, objectType string, v uint64) {
- m.shardMetrics.With(
- prometheus.Labels{
- shardIDLabelKey: shardID,
- counterTypeLabelKey: objectType,
- },
- ).Set(float64(v))
-}
-
-func (m objectServiceMetrics) SetReadonly(shardID string, readonly bool) {
- var flag float64
- if readonly {
- flag = 1
- }
- m.shardsReadonly.With(
- prometheus.Labels{
- shardIDLabelKey: shardID,
- },
- ).Set(flag)
-}
diff --git a/pkg/metrics/state.go b/pkg/metrics/state.go
deleted file mode 100644
index 94e28af389..0000000000
--- a/pkg/metrics/state.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package metrics
-
-import "github.com/prometheus/client_golang/prometheus"
-
-const stateSubsystem = "state"
-
-type stateMetrics struct {
- healthCheck prometheus.Gauge
-}
-
-func newStateMetrics() stateMetrics {
- return stateMetrics{
- healthCheck: prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: stateSubsystem,
- Name: "health",
- Help: "Current Node state",
- }),
- }
-}
-
-func (m stateMetrics) register() {
- prometheus.MustRegister(m.healthCheck)
-}
-
-func (m stateMetrics) SetHealth(s int32) {
- m.healthCheck.Set(float64(s))
-}
diff --git a/pkg/morph/client/actor.go b/pkg/morph/client/actor.go
new file mode 100644
index 0000000000..2849f3052b
--- /dev/null
+++ b/pkg/morph/client/actor.go
@@ -0,0 +1,144 @@
+package client
+
+import (
+ "github.com/google/uuid"
+ "github.com/nspcc-dev/neo-go/pkg/config/netmode"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+type actorProvider interface {
+ GetActor() *actor.Actor
+ GetRPCActor() actor.RPCActor
+}
+
+// SwitchRPCGuardedActor switches an established connection with neo-go if it is broken.
+// This leads to an invalidation of an rpc actor within Client. That means the
+// components that are initilized with the rpc actor may unintentionally use
+// it when it is already invalidated. SwitchRPCGuardedActor is used to prevent
+// this situation, getting the rpc actor from Client.
+type SwitchRPCGuardedActor struct {
+ actorProvider actorProvider
+}
+
+func NewSwitchRPCGuardedActor(c *Client) *SwitchRPCGuardedActor {
+ return &SwitchRPCGuardedActor{
+ actorProvider: c,
+ }
+}
+
+func (a *SwitchRPCGuardedActor) Call(contract util.Uint160, operation string, params ...any) (*result.Invoke, error) {
+ return a.actorProvider.GetActor().Call(contract, operation, params...)
+}
+
+func (a *SwitchRPCGuardedActor) CalculateNetworkFee(tx *transaction.Transaction) (int64, error) {
+ return a.actorProvider.GetActor().CalculateNetworkFee(tx)
+}
+
+func (a *SwitchRPCGuardedActor) CalculateValidUntilBlock() (uint32, error) {
+ return a.actorProvider.GetActor().CalculateValidUntilBlock()
+}
+
+func (a *SwitchRPCGuardedActor) GetBlockCount() (uint32, error) {
+ return a.actorProvider.GetActor().GetBlockCount()
+}
+
+func (a *SwitchRPCGuardedActor) GetNetwork() netmode.Magic {
+ return a.actorProvider.GetActor().GetNetwork()
+}
+
+func (a *SwitchRPCGuardedActor) GetVersion() result.Version {
+ return a.actorProvider.GetActor().GetVersion()
+}
+
+func (a *SwitchRPCGuardedActor) MakeCall(contract util.Uint160, method string, params ...any) (*transaction.Transaction, error) {
+ return a.actorProvider.GetActor().MakeCall(contract, method, params...)
+}
+
+func (a *SwitchRPCGuardedActor) MakeRun(script []byte) (*transaction.Transaction, error) {
+ return a.actorProvider.GetActor().MakeRun(script)
+}
+
+func (a *SwitchRPCGuardedActor) MakeTunedCall(contract util.Uint160, method string, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier, params ...any) (*transaction.Transaction, error) {
+ return a.actorProvider.GetActor().MakeTunedCall(contract, method, attrs, txHook, params...)
+}
+
+func (a *SwitchRPCGuardedActor) MakeTunedRun(script []byte, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier) (*transaction.Transaction, error) {
+ return a.actorProvider.GetActor().MakeTunedRun(script, attrs, txHook)
+}
+
+func (a *SwitchRPCGuardedActor) MakeUncheckedRun(script []byte, sysfee int64, attrs []transaction.Attribute, txHook actor.TransactionModifier) (*transaction.Transaction, error) {
+ return a.actorProvider.GetActor().MakeUncheckedRun(script, sysfee, attrs, txHook)
+}
+
+func (a *SwitchRPCGuardedActor) MakeUnsignedCall(contract util.Uint160, method string, attrs []transaction.Attribute, params ...any) (*transaction.Transaction, error) {
+ return a.actorProvider.GetActor().MakeUnsignedCall(contract, method, attrs, params...)
+}
+
+func (a *SwitchRPCGuardedActor) MakeUnsignedRun(script []byte, attrs []transaction.Attribute) (*transaction.Transaction, error) {
+ return a.actorProvider.GetActor().MakeUnsignedRun(script, attrs)
+}
+
+func (a *SwitchRPCGuardedActor) MakeUnsignedUncheckedRun(script []byte, sysFee int64, attrs []transaction.Attribute) (*transaction.Transaction, error) {
+ return a.actorProvider.GetActor().MakeUnsignedUncheckedRun(script, sysFee, attrs)
+}
+
+func (a *SwitchRPCGuardedActor) Send(tx *transaction.Transaction) (util.Uint256, uint32, error) {
+ return a.actorProvider.GetActor().Send(tx)
+}
+
+func (a *SwitchRPCGuardedActor) SendCall(contract util.Uint160, method string, params ...any) (util.Uint256, uint32, error) {
+ return a.actorProvider.GetActor().SendCall(contract, method, params...)
+}
+
+func (a *SwitchRPCGuardedActor) SendRun(script []byte) (util.Uint256, uint32, error) {
+ return a.actorProvider.GetActor().SendRun(script)
+}
+
+func (a *SwitchRPCGuardedActor) SendTunedCall(contract util.Uint160, method string, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier, params ...any) (util.Uint256, uint32, error) {
+ return a.actorProvider.GetActor().SendTunedCall(contract, method, attrs, txHook, params...)
+}
+
+func (a *SwitchRPCGuardedActor) SendTunedRun(script []byte, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier) (util.Uint256, uint32, error) {
+ return a.actorProvider.GetActor().SendTunedRun(script, attrs, txHook)
+}
+
+func (a *SwitchRPCGuardedActor) SendUncheckedRun(script []byte, sysfee int64, attrs []transaction.Attribute, txHook actor.TransactionModifier) (util.Uint256, uint32, error) {
+ return a.actorProvider.GetActor().SendUncheckedRun(script, sysfee, attrs, txHook)
+}
+
+func (a *SwitchRPCGuardedActor) Sender() util.Uint160 {
+ return a.actorProvider.GetActor().Sender()
+}
+
+func (a *SwitchRPCGuardedActor) Sign(tx *transaction.Transaction) error {
+ return a.actorProvider.GetActor().Sign(tx)
+}
+
+func (a *SwitchRPCGuardedActor) SignAndSend(tx *transaction.Transaction) (util.Uint256, uint32, error) {
+ return a.actorProvider.GetActor().SignAndSend(tx)
+}
+
+func (a *SwitchRPCGuardedActor) CallAndExpandIterator(contract util.Uint160, method string, maxItems int, params ...any) (*result.Invoke, error) {
+ return a.actorProvider.GetActor().CallAndExpandIterator(contract, method, maxItems, params...)
+}
+
+func (a *SwitchRPCGuardedActor) TerminateSession(sessionID uuid.UUID) error {
+ return a.actorProvider.GetActor().TerminateSession(sessionID)
+}
+
+func (a *SwitchRPCGuardedActor) TraverseIterator(sessionID uuid.UUID, iterator *result.Iterator, num int) ([]stackitem.Item, error) {
+ return a.actorProvider.GetActor().TraverseIterator(sessionID, iterator, num)
+}
+
+func (a *SwitchRPCGuardedActor) GetRPCActor() actor.RPCActor {
+ return a.actorProvider.GetRPCActor()
+}
+
+func (a *SwitchRPCGuardedActor) GetRPCInvoker() invoker.RPCInvoke {
+ return a.actorProvider.GetRPCActor()
+}
diff --git a/pkg/morph/client/audit/client.go b/pkg/morph/client/audit/client.go
deleted file mode 100644
index b922fc7925..0000000000
--- a/pkg/morph/client/audit/client.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package audit
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-// Client is a wrapper over StaticClient
-// which makes calls with the names and arguments
-// of the FrostFS Audit contract.
-//
-// Working client must be created via constructor New.
-// Using the Client that has been created with new(Client)
-// expression (or just declaring a Client variable) is unsafe
-// and can lead to panic.
-type Client struct {
- client *client.StaticClient // static Audit contract client
-}
-
-const (
- putResultMethod = "put"
- getResultMethod = "get"
- listResultsMethod = "list"
- listByEpochResultsMethod = "listByEpoch"
- listByCIDResultsMethod = "listByCID"
- listByNodeResultsMethod = "listByNode"
-)
-
-// NewFromMorph returns the wrapper instance from the raw morph client.
-func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...client.StaticClientOption) (*Client, error) {
- sc, err := client.NewStatic(cli, contract, fee, opts...)
- if err != nil {
- return nil, fmt.Errorf("could not create static client of audit contract: %w", err)
- }
-
- return &Client{client: sc}, nil
-}
diff --git a/pkg/morph/client/audit/get_result.go b/pkg/morph/client/audit/get_result.go
deleted file mode 100644
index 9e1b75db45..0000000000
--- a/pkg/morph/client/audit/get_result.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package audit
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- auditAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
-)
-
-// GetAuditResult returns audit result structure stored in audit contract.
-func (c *Client) GetAuditResult(id ResultID) (*auditAPI.Result, error) {
- prm := client.TestInvokePrm{}
- prm.SetMethod(getResultMethod)
- prm.SetArgs([]byte(id))
-
- prms, err := c.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getResultMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", getResultMethod, ln)
- }
-
- value, err := client.BytesFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", getResultMethod, err)
- }
-
- var auditRes auditAPI.Result
- if err := auditRes.Unmarshal(value); err != nil {
- return nil, fmt.Errorf("could not unmarshal audit result structure: %w", err)
- }
-
- return &auditRes, nil
-}
diff --git a/pkg/morph/client/audit/list_results.go b/pkg/morph/client/audit/list_results.go
deleted file mode 100644
index ace01d15ba..0000000000
--- a/pkg/morph/client/audit/list_results.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package audit
-
-import (
- "crypto/sha256"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-// ListAllAuditResultID returns a list of all audit result IDs inside audit contract.
-func (c *Client) ListAllAuditResultID() ([]ResultID, error) {
- invokePrm := client.TestInvokePrm{}
- invokePrm.SetMethod(listResultsMethod)
-
- items, err := c.client.TestInvoke(invokePrm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listResultsMethod, err)
- }
- return parseAuditResults(items, listResultsMethod)
-}
-
-// ListAuditResultIDByEpoch returns a list of audit result IDs inside audit
-// contract for specific epoch number.
-func (c *Client) ListAuditResultIDByEpoch(epoch uint64) ([]ResultID, error) {
- prm := client.TestInvokePrm{}
- prm.SetMethod(listByEpochResultsMethod)
- prm.SetArgs(epoch)
-
- items, err := c.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listByEpochResultsMethod, err)
- }
- return parseAuditResults(items, listByEpochResultsMethod)
-}
-
-// ListAuditResultIDByCID returns a list of audit result IDs inside audit
-// contract for specific epoch number and container ID.
-func (c *Client) ListAuditResultIDByCID(epoch uint64, cnr cid.ID) ([]ResultID, error) {
- binCnr := make([]byte, sha256.Size)
- cnr.Encode(binCnr)
-
- prm := client.TestInvokePrm{}
- prm.SetMethod(listByCIDResultsMethod)
- prm.SetArgs(epoch, binCnr)
-
- items, err := c.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listByCIDResultsMethod, err)
- }
- return parseAuditResults(items, listByCIDResultsMethod)
-}
-
-// ListAuditResultIDByNode returns a list of audit result IDs inside audit
-// contract for specific epoch number, container ID and inner ring public key.
-func (c *Client) ListAuditResultIDByNode(epoch uint64, cnr cid.ID, nodeKey []byte) ([]ResultID, error) {
- binCnr := make([]byte, sha256.Size)
- cnr.Encode(binCnr)
-
- prm := client.TestInvokePrm{}
- prm.SetMethod(listByNodeResultsMethod)
- prm.SetArgs(epoch, binCnr, nodeKey)
-
- items, err := c.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listByNodeResultsMethod, err)
- }
- return parseAuditResults(items, listByNodeResultsMethod)
-}
-
-func parseAuditResults(items []stackitem.Item, method string) ([]ResultID, error) {
- if ln := len(items); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", method, ln)
- }
-
- items, err := client.ArrayFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err)
- }
-
- res := make([]ResultID, 0, len(items))
- for i := range items {
- rawRes, err := client.BytesFromStackItem(items[i])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", method, err)
- }
-
- res = append(res, rawRes)
- }
-
- return res, nil
-}
diff --git a/pkg/morph/client/audit/put_result.go b/pkg/morph/client/audit/put_result.go
deleted file mode 100644
index f8e233b263..0000000000
--- a/pkg/morph/client/audit/put_result.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package audit
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- auditAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
-)
-
-// ResultID is an identity of audit result inside audit contract.
-type ResultID []byte
-
-// PutPrm groups parameters of PutAuditResult operation.
-type PutPrm struct {
- result *auditAPI.Result
-
- client.InvokePrmOptional
-}
-
-// SetResult sets audit result.
-func (p *PutPrm) SetResult(result *auditAPI.Result) {
- p.result = result
-}
-
-// PutAuditResult saves passed audit result structure in FrostFS system
-// through Audit contract call.
-//
-// Returns encountered error that caused the saving to interrupt.
-func (c *Client) PutAuditResult(p PutPrm) error {
- prm := client.InvokePrm{}
- prm.SetMethod(putResultMethod)
- prm.SetArgs(p.result.Marshal())
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- err := c.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", putResultMethod, err)
- }
- return nil
-}
diff --git a/pkg/morph/client/audit/result_test.go b/pkg/morph/client/audit/result_test.go
deleted file mode 100644
index a0137e3b83..0000000000
--- a/pkg/morph/client/audit/result_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package audit
-
-import (
- "testing"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- auditAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/stretchr/testify/require"
-)
-
-func TestAuditResults(t *testing.T) {
- t.Skip()
- const epoch = 11
-
- endpoint := "http://morph_chain.frostfs.devenv:30333"
- sAuditHash := "cdfb3dab86e6d60e8a143d9e2ecb0b188f3dc2eb"
- irKeyWIF := "L3o221BojgcCPYgdbXsm6jn7ayTZ72xwREvBHXKknR8VJ3G4WmjB"
-
- key, err := keys.NewPrivateKeyFromWIF(irKeyWIF)
- require.NoError(t, err)
-
- auditHash, err := util.Uint160DecodeStringLE(sAuditHash)
- require.NoError(t, err)
-
- morphClient, err := client.New(key, client.WithEndpoints(client.Endpoint{Address: endpoint}))
- require.NoError(t, err)
-
- auditClientWrapper, err := NewFromMorph(morphClient, auditHash, 0)
- require.NoError(t, err)
-
- id := cidtest.ID()
-
- var auditRes auditAPI.Result
- auditRes.ForEpoch(epoch)
- auditRes.SetAuditorKey(key.PublicKey().Bytes())
- auditRes.ForContainer(id)
-
- prm := PutPrm{}
- prm.SetResult(&auditRes)
-
- require.NoError(t, auditClientWrapper.PutAuditResult(prm))
-
- time.Sleep(5 * time.Second)
-
- list, err := auditClientWrapper.ListAuditResultIDByCID(epoch, id)
- require.NoError(t, err)
- require.Len(t, list, 1)
-
- savedAuditRes, err := auditClientWrapper.GetAuditResult(list[0])
- require.NoError(t, err)
-
- require.Equal(t, auditRes, savedAuditRes)
-}
diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go
index aae245acd2..4462daab4a 100644
--- a/pkg/morph/client/balance/balanceOf.go
+++ b/pkg/morph/client/balance/balanceOf.go
@@ -1,36 +1,33 @@
package balance
import (
+ "context"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// BalanceOf receives the amount of funds in the client's account
// through the Balance contract call, and returns it.
-func (c *Client) BalanceOf(id user.ID) (*big.Int, error) {
- h, err := address.StringToUint160(id.EncodeToString())
- if err != nil {
- return nil, err
- }
+func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) {
+ h := id.ScriptHash()
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(balanceOfMethod)
invokePrm.SetArgs(h)
- prms, err := c.client.TestInvoke(invokePrm)
+ prms, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err)
} else if ln := len(prms); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln)
}
amount, err := client.BigIntFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err)
}
return amount, nil
}
diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go
index 88a1aa8f11..f4685b0ab6 100644
--- a/pkg/morph/client/balance/burn.go
+++ b/pkg/morph/client/balance/burn.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -30,11 +32,12 @@ func (b *BurnPrm) SetID(id []byte) {
}
// Burn destroys funds from the account.
-func (c *Client) Burn(p BurnPrm) error {
+func (c *Client) Burn(ctx context.Context, p BurnPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(burnMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- return c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
+ return err
}
diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go
index b05c526dc7..1dacb9574c 100644
--- a/pkg/morph/client/balance/client.go
+++ b/pkg/morph/client/balance/client.go
@@ -39,7 +39,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("could not create static client of Balance contract: %w", err)
+ return nil, fmt.Errorf("create 'balance' contract client: %w", err)
}
return &Client{
@@ -54,15 +54,7 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- *o = append(*o, client.TryNotary())
- }
+ return &opts{client.TryNotary()}
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go
index 39e4b28e57..57e61d62be 100644
--- a/pkg/morph/client/balance/decimals.go
+++ b/pkg/morph/client/balance/decimals.go
@@ -1,6 +1,7 @@
package balance
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -8,20 +9,20 @@ import (
// Decimals decimal precision of currency transactions
// through the Balance contract call, and returns it.
-func (c *Client) Decimals() (uint32, error) {
+func (c *Client) Decimals(ctx context.Context) (uint32, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(decimalsMethod)
- prms, err := c.client.TestInvoke(invokePrm)
+ prms, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return 0, fmt.Errorf("could not perform test invocation (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err)
} else if ln := len(prms); ln != 1 {
return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln)
}
decimals, err := client.IntFromStackItem(prms[0])
if err != nil {
- return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err)
}
return uint32(decimals), nil
}
diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go
index 7b270808e1..83e8b05868 100644
--- a/pkg/morph/client/balance/lock.go
+++ b/pkg/morph/client/balance/lock.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -42,11 +44,12 @@ func (l *LockPrm) SetDueEpoch(dueEpoch int64) {
}
// Lock locks fund on the user account.
-func (c *Client) Lock(p LockPrm) error {
+func (c *Client) Lock(ctx context.Context, p LockPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(lockMethod)
prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch)
prm.InvokePrmOptional = p.InvokePrmOptional
- return c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
+ return err
}
diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go
index cda78c0772..082ade85e5 100644
--- a/pkg/morph/client/balance/mint.go
+++ b/pkg/morph/client/balance/mint.go
@@ -1,6 +1,8 @@
package balance
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -30,11 +32,12 @@ func (m *MintPrm) SetID(id []byte) {
}
// Mint sends funds to the account.
-func (c *Client) Mint(p MintPrm) error {
+func (c *Client) Mint(ctx context.Context, p MintPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(mintMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- return c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
+ return err
}
diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go
index 5206f69fa3..870bed1661 100644
--- a/pkg/morph/client/balance/transfer.go
+++ b/pkg/morph/client/balance/transfer.go
@@ -1,11 +1,11 @@
package balance
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// TransferPrm groups parameters of TransferX method.
@@ -21,27 +21,18 @@ type TransferPrm struct {
// TransferX transfers p.Amount of GASe-12 from p.From to p.To
// with details p.Details through direct smart contract call.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) TransferX(p TransferPrm) error {
- from, err := address.StringToUint160(p.From.EncodeToString())
- if err != nil {
- return err
- }
-
- to, err := address.StringToUint160(p.To.EncodeToString())
- if err != nil {
- return err
- }
+func (c *Client) TransferX(ctx context.Context, p TransferPrm) error {
+ from := p.From.ScriptHash()
+ to := p.To.ScriptHash()
prm := client.InvokePrm{}
prm.SetMethod(transferXMethod)
prm.SetArgs(from, to, p.Amount, p.Details)
prm.InvokePrmOptional = p.InvokePrmOptional
- err = c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err)
+ return fmt.Errorf("invoke method (%s): %w", transferXMethod, err)
}
return nil
}
diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go
index 51a030e63d..aab058d277 100644
--- a/pkg/morph/client/client.go
+++ b/pkg/morph/client/client.go
@@ -6,26 +6,34 @@ import (
"fmt"
"math/big"
"sync"
+ "sync/atomic"
"time"
+ nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "github.com/google/uuid"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/rolemgmt"
- sc "github.com/nspcc-dev/neo-go/pkg/smartcontract"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
"github.com/nspcc-dev/neo-go/pkg/wallet"
- "go.uber.org/atomic"
"go.uber.org/zap"
)
@@ -46,18 +54,20 @@ import (
type Client struct {
cache cache
- logger *logger.Logger // logging component
+ logger *logger.Logger // logging component
+ metrics morphmetrics.Register
client *rpcclient.WSClient // neo-go websocket client
rpcActor *actor.Actor // neo-go RPC actor
gasToken *nep17.Token // neo-go GAS token wrapper
rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper
+ nnsHash util.Uint160 // NNS contract hash
+
+ nnsReader *nnsClient.ContractReader // NNS contract wrapper
acc *wallet.Account // neo account
accAddr util.Uint160 // account's address
- signer *transaction.Signer
-
notary *notaryInfo
cfg cfg
@@ -67,18 +77,12 @@ type Client struct {
// switchLock protects endpoints, inactive, and subscription-related fields.
// It is taken exclusively during endpoint switch and locked in shared mode
// on every normal call.
- switchLock *sync.RWMutex
-
- // channel for ws notifications
- notifications chan rpcclient.Notification
+ switchLock sync.RWMutex
// channel for internal stop
closeChan chan struct{}
-
- // cached subscription information
- subscribedEvents map[util.Uint160]string
- subscribedNotaryEvents map[util.Uint160]string
- subscribedToNewBlocks bool
+ closed atomic.Bool
+ wg sync.WaitGroup
// indicates that Client is not able to
// establish connection to any of the
@@ -92,28 +96,15 @@ type Client struct {
}
type cache struct {
- m *sync.RWMutex
+ m sync.RWMutex
- nnsHash *util.Uint160
gKey *keys.PublicKey
txHeights *lru.Cache[util.Uint256, uint32]
+
+ metrics metrics.MorphCacheMetrics
}
-func (c cache) nns() *util.Uint160 {
- c.m.RLock()
- defer c.m.RUnlock()
-
- return c.nnsHash
-}
-
-func (c *cache) setNNSHash(nnsHash util.Uint160) {
- c.m.Lock()
- defer c.m.Unlock()
-
- c.nnsHash = &nnsHash
-}
-
-func (c cache) groupKey() *keys.PublicKey {
+func (c *cache) groupKey() *keys.PublicKey {
c.m.RLock()
defer c.m.RUnlock()
@@ -131,7 +122,6 @@ func (c *cache) invalidate() {
c.m.Lock()
defer c.m.Unlock()
- c.nnsHash = nil
c.gKey = nil
c.txHeights.Purge()
}
@@ -161,25 +151,53 @@ func (e *notHaltStateError) Error() string {
)
}
-var errEmptyInvocationScript = errors.New("got empty invocation script from neo node")
-
-// implementation of error interface for FrostFS-specific errors.
-type frostfsError struct {
- err error
-}
-
-func (e frostfsError) Error() string {
- return fmt.Sprintf("frostfs error: %v", e.err)
-}
-
-// wraps FrostFS-specific error into frostfsError. Arg must not be nil.
-func wrapFrostFSError(err error) error {
- return frostfsError{err}
-}
-
// Invoke invokes contract method by sending transaction into blockchain.
+// Returns valid until block value.
// Supported args types: int64, string, util.Uint160, []byte and bool.
-func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) error {
+func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (InvokeRes, error) {
+ start := time.Now()
+ success := false
+ defer func() {
+ c.metrics.ObserveInvoke("Invoke", contract.String(), method, success, time.Since(start))
+ }()
+
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ if c.inactive {
+ return InvokeRes{}, ErrConnectionLost
+ }
+
+ txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
+ if err != nil {
+ return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err)
+ }
+
+ c.logger.Debug(ctx, logs.ClientNeoClientInvoke,
+ zap.String("method", method),
+ zap.Uint32("vub", vub),
+ zap.Stringer("tx_hash", txHash.Reverse()))
+
+ success = true
+ return InvokeRes{Hash: txHash, VUB: vub}, nil
+}
+
+// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element.
+// If cb returns an error, the session is closed and this error is returned as-is.
+// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
+// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
+// The default batchSize is 100, the default limit from neo-go.
+func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error {
+ start := time.Now()
+ success := false
+ defer func() {
+ c.metrics.ObserveInvoke("TestInvokeIterator", contract.String(), method, success, time.Since(start))
+ }()
+
+ if batchSize <= 0 {
+ batchSize = invoker.DefaultIteratorResultItems
+ }
+
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -187,22 +205,66 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string,
return ErrConnectionLost
}
- txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
+ script, err := smartcontract.CreateCallAndPrefetchIteratorScript(contract, method, batchSize, args...)
if err != nil {
- return fmt.Errorf("could not invoke %s: %w", method, err)
+ return err
}
- c.logger.Debug("neo client invoke",
- zap.String("method", method),
- zap.Uint32("vub", vub),
- zap.Stringer("tx_hash", txHash.Reverse()))
+ val, err := c.rpcActor.Run(script)
+ if err != nil {
+ return err
+ } else if val.State != HaltState {
+ return ¬HaltStateError{state: val.State, exception: val.FaultException}
+ }
+ arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err)
+ if err != nil {
+ return err
+ }
+ for i := range arr {
+ if err := cb(arr[i]); err != nil {
+ return err
+ }
+ }
+ if (sid == uuid.UUID{}) {
+ success = true
+ return nil
+ }
+
+ defer func() {
+ _ = c.rpcActor.TerminateSession(sid)
+ }()
+
+ // Batch size for TraverseIterator() can restricted on the server-side.
+ traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems)
+ for {
+ items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize)
+ if err != nil {
+ return err
+ }
+
+ for i := range items {
+ if err := cb(items[i]); err != nil {
+ return err
+ }
+ }
+ if len(items) < traverseBatchSize {
+ break
+ }
+ }
+ success = true
return nil
}
// TestInvoke invokes contract method locally in neo-go node. This method should
// be used to read data from smart-contract.
func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) (res []stackitem.Item, err error) {
+ start := time.Now()
+ success := false
+ defer func() {
+ c.metrics.ObserveInvoke("TestInvoke", contract.String(), method, success, time.Since(start))
+ }()
+
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -216,9 +278,10 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) (
}
if val.State != HaltState {
- return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
+ return nil, ¬HaltStateError{state: val.State, exception: val.FaultException}
}
+ success = true
return val.Stack, nil
}
@@ -236,7 +299,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
return err
}
- c.logger.Debug("native gas transfer invoke",
+ c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke,
zap.String("to", receiver.StringLE()),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -270,7 +333,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
return err
}
- c.logger.Debug("batch gas transfer invoke",
+ c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke,
zap.Strings("to", receiversLog),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -297,22 +360,22 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error("can't get blockchain height",
- zap.String("error", err.Error()))
+ c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
+ zap.Error(err))
return nil
}
for {
select {
case <-ctx.Done():
- return nil
+ return ctx.Err()
default:
}
newHeight, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error("can't get blockchain height",
- zap.String("error", err.Error()))
+ c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
+ zap.Error(err))
return nil
}
@@ -370,6 +433,28 @@ func (c *Client) TxHalt(h util.Uint256) (res bool, err error) {
return len(aer.Executions) > 0 && aer.Executions[0].VMState.HasFlag(vmstate.Halt), nil
}
+func (c *Client) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ if c.inactive {
+ return nil, ErrConnectionLost
+ }
+
+ return c.client.GetApplicationLog(hash, trig)
+}
+
+func (c *Client) GetVersion() (*result.Version, error) {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ if c.inactive {
+ return nil, ErrConnectionLost
+ }
+
+ return c.client.GetVersion()
+}
+
// TxHeight returns true if transaction has been successfully executed and persisted.
func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
c.switchLock.RLock()
@@ -385,7 +470,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
// NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain
// stores alphabet node keys of inner ring there, however the sidechain stores both
// alphabet and non alphabet node keys of inner ring.
-func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
+func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -395,7 +480,7 @@ func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
list, err := c.roleList(noderoles.NeoFSAlphabet)
if err != nil {
- return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err)
+ return nil, fmt.Errorf("get alphabet nodes role list: %w", err)
}
return list, nil
@@ -409,70 +494,12 @@ func (c *Client) GetDesignateHash() util.Uint160 {
func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) {
height, err := c.rpcActor.GetBlockCount()
if err != nil {
- return nil, fmt.Errorf("can't get chain height: %w", err)
+ return nil, fmt.Errorf("get chain height: %w", err)
}
return c.rolemgmt.GetDesignatedByRole(r, height)
}
-// tries to resolve sc.Parameter from the arg.
-//
-// Wraps any error to frostfsError.
-func toStackParameter(value any) (sc.Parameter, error) {
- var result = sc.Parameter{
- Value: value,
- }
-
- switch v := value.(type) {
- case []byte:
- result.Type = sc.ByteArrayType
- case int:
- result.Type = sc.IntegerType
- result.Value = big.NewInt(int64(v))
- case int64:
- result.Type = sc.IntegerType
- result.Value = big.NewInt(v)
- case uint64:
- result.Type = sc.IntegerType
- result.Value = new(big.Int).SetUint64(v)
- case [][]byte:
- arr := make([]sc.Parameter, 0, len(v))
- for i := range v {
- elem, err := toStackParameter(v[i])
- if err != nil {
- return result, err
- }
-
- arr = append(arr, elem)
- }
-
- result.Type = sc.ArrayType
- result.Value = arr
- case string:
- result.Type = sc.StringType
- case util.Uint160:
- result.Type = sc.ByteArrayType
- result.Value = v.BytesBE()
- case noderoles.Role:
- result.Type = sc.IntegerType
- result.Value = big.NewInt(int64(v))
- case keys.PublicKeys:
- arr := make([][]byte, 0, len(v))
- for i := range v {
- arr = append(arr, v[i].Bytes())
- }
-
- return toStackParameter(arr)
- case bool:
- result.Type = sc.BoolType
- result.Value = v
- default:
- return result, wrapFrostFSError(fmt.Errorf("chain/client: unsupported parameter %v", value))
- }
-
- return result, nil
-}
-
// MagicNumber returns the magic number of the network
// to which the underlying RPC node client is connected.
func (c *Client) MagicNumber() (uint64, error) {
@@ -514,7 +541,7 @@ func (c *Client) MsPerBlock() (res int64, err error) {
}
// IsValidScript returns true if invocation script executes with HALT state.
-func (c *Client) IsValidScript(script []byte, signers []transaction.Signer) (res bool, err error) {
+func (c *Client) IsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -522,40 +549,35 @@ func (c *Client) IsValidScript(script []byte, signers []transaction.Signer) (res
return false, ErrConnectionLost
}
- result, err := c.client.InvokeScript(script, signers)
+ res, err := c.client.InvokeScript(script, signers)
if err != nil {
return false, fmt.Errorf("invokeScript: %w", err)
}
- return result.State == vmstate.Halt.String(), nil
+ return res.State == vmstate.Halt.String(), nil
}
-// NotificationChannel returns channel than receives subscribed
-// notification from the connected RPC node.
-// Channel is closed when connection to the RPC node has been
-// lost without the possibility of recovery.
-func (c *Client) NotificationChannel() <-chan rpcclient.Notification {
- return c.notifications
-}
-
-// inactiveMode switches Client to an inactive mode:
-// - notification channel is closed;
-// - all the new RPC request would return ErrConnectionLost;
-// - inactiveModeCb is called if not nil.
-func (c *Client) inactiveMode() {
- c.switchLock.Lock()
- defer c.switchLock.Unlock()
-
- close(c.notifications)
- c.inactive = true
-
- if c.cfg.inactiveModeCb != nil {
- c.cfg.inactiveModeCb()
- }
+func (c *Client) Metrics() morphmetrics.Register {
+ return c.metrics
}
func (c *Client) setActor(act *actor.Actor) {
c.rpcActor = act
c.gasToken = nep17.New(act, gas.Hash)
c.rolemgmt = rolemgmt.New(act)
+ c.nnsReader = nnsClient.NewReader(act, c.nnsHash)
+}
+
+func (c *Client) GetActor() *actor.Actor {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ return c.rpcActor
+}
+
+func (c *Client) GetRPCActor() actor.RPCActor {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ return c.client
}
diff --git a/pkg/morph/client/client_test.go b/pkg/morph/client/client_test.go
deleted file mode 100644
index a448c2cf4a..0000000000
--- a/pkg/morph/client/client_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package client
-
-import (
- "math/big"
- "testing"
-
- sc "github.com/nspcc-dev/neo-go/pkg/smartcontract"
- "github.com/stretchr/testify/require"
-)
-
-func TestToStackParameter(t *testing.T) {
- items := []struct {
- value any
- expType sc.ParamType
- expVal any
- }{
- {
- value: []byte{1, 2, 3},
- expType: sc.ByteArrayType,
- },
- {
- value: int64(100),
- expType: sc.IntegerType,
- expVal: big.NewInt(100),
- },
- {
- value: uint64(100),
- expType: sc.IntegerType,
- expVal: big.NewInt(100),
- },
- {
- value: "hello world",
- expType: sc.StringType,
- },
- {
- value: false,
- expType: sc.BoolType,
- },
- {
- value: true,
- expType: sc.BoolType,
- },
- }
-
- for _, item := range items {
- t.Run(item.expType.String()+" to stack parameter", func(t *testing.T) {
- res, err := toStackParameter(item.value)
- require.NoError(t, err)
- require.Equal(t, item.expType, res.Type)
- if item.expVal != nil {
- require.Equal(t, item.expVal, res.Value)
- } else {
- require.Equal(t, item.value, res.Value)
- }
- })
- }
-}
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index 01fd30f70a..e4dcd0db71 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -4,9 +4,12 @@ import (
"context"
"errors"
"fmt"
- "sync"
+ "net"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@@ -27,23 +30,25 @@ type Callback func()
// groups the configurations with default values.
type cfg struct {
- ctx context.Context // neo-go client context
-
dialTimeout time.Duration // client dial timeout
logger *logger.Logger // logging component
+ metrics morphmetrics.Register
+
waitInterval time.Duration
signer *transaction.Signer
endpoints []Endpoint
- singleCli *rpcclient.WSClient // neo-go client for single client mode
-
inactiveModeCb Callback
switchInterval time.Duration
+
+ morphCacheMetrics metrics.MorphCacheMetrics
+
+ dialerSource DialerSource
}
const (
@@ -51,15 +56,19 @@ const (
defaultWaitInterval = 500 * time.Millisecond
)
+var ErrNoHealthyEndpoint = errors.New("no healthy endpoint")
+
func defaultConfig() *cfg {
return &cfg{
- ctx: context.Background(),
dialTimeout: defaultDialTimeout,
- logger: &logger.Logger{Logger: zap.L()},
+ logger: logger.NewLoggerWrapper(zap.L()),
+ metrics: morphmetrics.NoopRegister{},
waitInterval: defaultWaitInterval,
signer: &transaction.Signer{
Scopes: transaction.Global,
},
+ morphCacheMetrics: &morphmetrics.NoopMorphCacheMetrics{},
+ dialerSource: &noopDialerSource{},
}
}
@@ -76,11 +85,14 @@ func defaultConfig() *cfg {
// - signer with the global scope;
// - wait interval: 500ms;
// - logger: &logger.Logger{Logger: zap.L()}.
+// - metrics: metrics.NoopRegister
//
// If desired option satisfies the default value, it can be omitted.
// If multiple options of the same config value are supplied,
// the option with the highest index in the arguments will be used.
-func New(key *keys.PrivateKey, opts ...Option) (*Client, error) {
+// If the list of endpoints provided - uses first alive.
+// If there are no healthy endpoint - returns ErrNoHealthyEndpoint.
+func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, error) {
if key == nil {
panic("empty private key")
}
@@ -101,53 +113,61 @@ func New(key *keys.PrivateKey, opts ...Option) (*Client, error) {
}
cli := &Client{
- cache: newClientCache(),
- logger: cfg.logger,
- acc: acc,
- accAddr: accAddr,
- signer: cfg.signer,
- cfg: *cfg,
- switchLock: &sync.RWMutex{},
- notifications: make(chan rpcclient.Notification),
- subscribedEvents: make(map[util.Uint160]string),
- subscribedNotaryEvents: make(map[util.Uint160]string),
- closeChan: make(chan struct{}),
+ cache: newClientCache(cfg.morphCacheMetrics),
+ logger: cfg.logger,
+ metrics: cfg.metrics,
+ acc: acc,
+ accAddr: accAddr,
+ cfg: *cfg,
+ closeChan: make(chan struct{}),
}
cli.endpoints.init(cfg.endpoints)
var err error
var act *actor.Actor
- if cfg.singleCli != nil {
- // return client in single RPC node mode that uses
- // predefined WS client
- //
- // in case of the closing web socket connection:
- // if extra endpoints were provided via options,
- // they will be used in switch process, otherwise
- // inactive mode will be enabled
- cli.client = cfg.singleCli
-
- act, err = newActor(cfg.singleCli, acc, *cfg)
+ var endpoint Endpoint
+ for cli.endpoints.curr, endpoint = range cli.endpoints.list {
+ cli.client, act, err = cli.newCli(ctx, endpoint)
if err != nil {
- return nil, fmt.Errorf("could not create RPC actor: %w", err)
- }
- } else {
- cli.client, act, err = cli.newCli(cli.endpoints.list[0].Address)
- if err != nil {
- return nil, fmt.Errorf("could not create RPC client: %w", err)
+ cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
+ zap.Error(err), zap.String("endpoint", endpoint.Address))
+ } else {
+ cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint,
+ zap.String("endpoint", endpoint.Address))
+ if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
+ cli.switchIsActive.Store(true)
+ go cli.switchToMostPrioritized(ctx)
+ }
+ break
}
}
+ if cli.client == nil {
+ return nil, ErrNoHealthyEndpoint
+ }
+ cs, err := cli.client.GetContractStateByID(nnsContractID)
+ if err != nil {
+ return nil, fmt.Errorf("resolve nns hash: %w", err)
+ }
+ cli.nnsHash = cs.Hash
cli.setActor(act)
- go cli.notificationLoop()
+ go cli.closeWaiter(ctx)
return cli, nil
}
-func (c *Client) newCli(endpoint string) (*rpcclient.WSClient, *actor.Actor, error) {
- cli, err := rpcclient.NewWS(c.cfg.ctx, endpoint, rpcclient.Options{
- DialTimeout: c.cfg.dialTimeout,
+func (c *Client) newCli(ctx context.Context, endpoint Endpoint) (*rpcclient.WSClient, *actor.Actor, error) {
+ cfg, err := endpoint.MTLSConfig.parse()
+ if err != nil {
+ return nil, nil, fmt.Errorf("read mtls certificates: %w", err)
+ }
+ cli, err := rpcclient.NewWS(ctx, endpoint.Address, rpcclient.WSOptions{
+ Options: rpcclient.Options{
+ DialTimeout: c.cfg.dialTimeout,
+ TLSClientConfig: cfg,
+ NetDialContext: c.cfg.dialerSource.NetContextDialer(),
+ },
})
if err != nil {
return nil, nil, fmt.Errorf("WS client creation: %w", err)
@@ -184,26 +204,11 @@ func newActor(ws *rpcclient.WSClient, acc *wallet.Account, cfg cfg) (*actor.Acto
}})
}
-func newClientCache() cache {
+func newClientCache(morphCacheMetrics metrics.MorphCacheMetrics) cache {
c, _ := lru.New[util.Uint256, uint32](100) // returns error only if size is negative
return cache{
- m: &sync.RWMutex{},
txHeights: c,
- }
-}
-
-// WithContext returns a client constructor option that
-// specifies the neo-go client context.
-//
-// Ignores nil value. Has no effect if WithSingleClient
-// is provided.
-//
-// If option not provided, context.Background() is used.
-func WithContext(ctx context.Context) Option {
- return func(c *cfg) {
- if ctx != nil {
- c.ctx = ctx
- }
+ metrics: morphCacheMetrics,
}
}
@@ -236,6 +241,20 @@ func WithLogger(logger *logger.Logger) Option {
}
}
+// WithMetrics returns a client constructor option
+// that specifies the component for reporting metrics.
+//
+// Ignores nil value.
+//
+// If option not provided, NoopMetrics is used.
+func WithMetrics(metrics morphmetrics.Register) Option {
+ return func(c *cfg) {
+ if metrics != nil {
+ c.metrics = metrics
+ }
+ }
+}
+
// WithSigner returns a client constructor option
// that specifies the signer and the scope of the transaction.
//
@@ -258,17 +277,6 @@ func WithEndpoints(endpoints ...Endpoint) Option {
}
}
-// WithSingleClient returns a client constructor option
-// that specifies single neo-go client and forces Client
-// to use it for requests.
-//
-// Passed client must already be initialized.
-func WithSingleClient(cli *rpcclient.WSClient) Option {
- return func(c *cfg) {
- c.singleCli = cli
- }
-}
-
// WithConnLostCallback return a client constructor option
// that specifies a callback that is called when Client
// unsuccessfully tried to connect to all the specified
@@ -287,3 +295,25 @@ func WithSwitchInterval(i time.Duration) Option {
c.switchInterval = i
}
}
+
+func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option {
+ return func(c *cfg) {
+ c.morphCacheMetrics = morphCacheMetrics
+ }
+}
+
+type DialerSource interface {
+ NetContextDialer() func(context.Context, string, string) (net.Conn, error)
+}
+
+type noopDialerSource struct{}
+
+func (ds *noopDialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
+ return nil
+}
+
+func WithDialerSource(ds DialerSource) Option {
+ return func(c *cfg) {
+ c.dialerSource = ds
+ }
+}
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index f93fd6686c..be684619b9 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -22,27 +22,18 @@ type Client struct {
}
const (
- putMethod = "put"
- deleteMethod = "delete"
- getMethod = "get"
- listMethod = "list"
- eaclMethod = "eACL"
- setEACLMethod = "setEACL"
-
- startEstimationMethod = "startContainerEstimation"
- stopEstimationMethod = "stopContainerEstimation"
-
- putSizeMethod = "putContainerSize"
- listSizesMethod = "listContainerSizes"
- getSizeMethod = "getContainerSize"
+ putMethod = "put"
+ deleteMethod = "delete"
+ getMethod = "get"
+ listMethod = "list"
+ containersOfMethod = "containersOf"
+ deletionInfoMethod = "deletionInfo"
// putNamedMethod is method name for container put with an alias. It is exported to provide custom fee.
putNamedMethod = "putNamed"
)
-var (
- errNilArgument = errors.New("empty argument")
-)
+var errNilArgument = errors.New("empty argument")
// NewFromMorph returns the wrapper instance from the raw morph client.
//
@@ -55,13 +46,9 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
opts[i](o)
}
- if o.feePutNamedSet {
- o.staticOpts = append(o.staticOpts, client.WithCustomFee(putNamedMethod, o.feePutNamed))
- }
-
- sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...)
+ sc, err := client.NewStatic(cli, contract, fee, *o...)
if err != nil {
- return nil, fmt.Errorf("can't create container static client: %w", err)
+ return nil, fmt.Errorf("create 'container' contract client: %w", err)
}
return &Client{client: sc}, nil
@@ -81,23 +68,10 @@ func (c Client) ContractAddress() util.Uint160 {
// parameter of Wrapper.
type Option func(*opts)
-type opts struct {
- feePutNamedSet bool
- feePutNamed fixedn.Fixed8
-
- staticOpts []client.StaticClientOption
-}
+type opts []client.StaticClientOption
func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- o.staticOpts = append(o.staticOpts, client.TryNotary())
- }
+ return &opts{client.TryNotary()}
}
// AsAlphabet returns option to sign main TX
@@ -107,14 +81,6 @@ func TryNotary() Option {
// Considered to be used by IR nodes only.
func AsAlphabet() Option {
return func(o *opts) {
- o.staticOpts = append(o.staticOpts, client.AsAlphabet())
- }
-}
-
-// WithCustomFeeForNamedPut returns option to specify custom fee for each Put operation with named container.
-func WithCustomFeeForNamedPut(fee fixedn.Fixed8) Option {
- return func(o *opts) {
- o.feePutNamed = fee
- o.feePutNamedSet = true
+ *o = append(*o, client.AsAlphabet())
}
}
diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go
new file mode 100644
index 0000000000..60fb8ad7c4
--- /dev/null
+++ b/pkg/morph/client/container/containers_of.go
@@ -0,0 +1,67 @@
+package container
+
+import (
+ "context"
+ "errors"
+
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+// ContainersOf returns a list of container identifiers belonging
+// to the specified user of FrostFS system. If idUser is nil, returns the list of all containers.
+//
+// If remote RPC does not support neo-go session API, fallback to List() method.
+func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) {
+ var cidList []cid.ID
+ var err error
+
+ cb := func(id cid.ID) error {
+ cidList = append(cidList, id)
+ return nil
+ }
+ if err = c.IterateContainersOf(ctx, idUser, cb); err != nil {
+ return nil, err
+ }
+ return cidList, nil
+}
+
+// iterateContainers iterates over a list of container identifiers
+// belonging to the specified user of FrostFS system and executes
+// `cb` on each element. If idUser is nil, calls it on the list of all containers.
+func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error {
+ var rawID []byte
+ if idUser != nil {
+ rawID = idUser.WalletBytes()
+ }
+
+ itemCb := func(item stackitem.Item) error {
+ id, err := getCIDfromStackItem(item)
+ if err != nil {
+ return err
+ }
+ if err = cb(id); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // We would like to have batch size as big as possible,
+ // to reduce the number of round-trips and avoid creating sessions.
+ // The limit depends on 2 things:
+ // 1. VM limits: max 2048 items on stack.
+ // 2. JSON encoded size for the item with type = 128k.
+ // It turns out, that for container ID the second limit is hit first,
+ // 512 is big enough value and it is beautiful.
+ const batchSize = 512
+
+ cnrHash := c.client.ContractAddress()
+ err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID)
+ if err != nil && errors.Is(err, unwrap.ErrNoSessionID) {
+ return c.iterate(ctx, idUser, cb)
+ }
+
+ return err
+}
diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go
index c9105a3ca1..09912efa52 100644
--- a/pkg/morph/client/container/delete.go
+++ b/pkg/morph/client/container/delete.go
@@ -1,6 +1,7 @@
package container
import (
+ "context"
"crypto/sha256"
"fmt"
@@ -12,20 +13,22 @@ import (
// along with signature and session token.
//
// Returns error if container ID is nil.
-func Delete(c *Client, witness core.RemovalWitness) error {
+func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error {
binCnr := make([]byte, sha256.Size)
- witness.ContainerID().Encode(binCnr)
+ witness.ContainerID.Encode(binCnr)
var prm DeletePrm
prm.SetCID(binCnr)
- prm.SetSignature(witness.Signature())
+ prm.SetSignature(witness.Signature.GetSign())
+ prm.SetKey(witness.Signature.GetKey())
- if tok := witness.SessionToken(); tok != nil {
+ if tok := witness.SessionToken; tok != nil {
prm.SetToken(tok.Marshal())
}
- return c.Delete(prm)
+ _, err := c.Delete(ctx, prm)
+ return err
}
// DeletePrm groups parameters of Delete client operation.
@@ -33,6 +36,7 @@ type DeletePrm struct {
cnr []byte
signature []byte
token []byte
+ key []byte
client.InvokePrmOptional
}
@@ -52,26 +56,29 @@ func (d *DeletePrm) SetToken(token []byte) {
d.token = token
}
+// SetKey sets public key.
+func (d *DeletePrm) SetKey(key []byte) {
+ d.key = key
+}
+
// Delete removes the container from FrostFS system
// through Container contract call.
//
-// Returns any error encountered that caused
+// Returns valid until block and any error encountered that caused
// the removal to interrupt.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) Delete(p DeletePrm) error {
- if len(p.signature) == 0 {
- return errNilArgument
+func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
+ if len(p.signature) == 0 && !p.IsControl() {
+ return 0, errNilArgument
}
prm := client.InvokePrm{}
prm.SetMethod(deleteMethod)
- prm.SetArgs(p.cnr, p.signature, p.token)
+ prm.SetArgs(p.cnr, p.signature, p.key, p.token)
prm.InvokePrmOptional = p.InvokePrmOptional
- err := c.client.Invoke(prm)
+ res, err := c.client.Invoke(ctx, prm)
if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err)
+ return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err)
}
- return nil
+ return res.VUB, nil
}
diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go
new file mode 100644
index 0000000000..90bcdd7d5c
--- /dev/null
+++ b/pkg/morph/client/container/deletion_info.go
@@ -0,0 +1,76 @@
+package container
+
+import (
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "strings"
+
+ containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
+ containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/mr-tron/base58"
+)
+
+func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) {
+ return DeletionInfo(ctx, (*Client)(x), cnr)
+}
+
+type deletionInfo interface {
+ DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error)
+}
+
+func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
+ binCnr := make([]byte, sha256.Size)
+ cnr.Encode(binCnr)
+
+ return c.DeletionInfo(ctx, binCnr)
+}
+
+func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) {
+ prm := client.TestInvokePrm{}
+ prm.SetMethod(deletionInfoMethod)
+ prm.SetArgs(cid)
+
+ res, err := c.client.TestInvoke(ctx, prm)
+ if err != nil {
+ if strings.Contains(err.Error(), containerContract.NotFoundError) {
+ return nil, new(apistatus.ContainerNotFound)
+ }
+ return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err)
+ } else if ln := len(res); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln)
+ }
+
+ arr, err := client.ArrayFromStackItem(res[0])
+ if err != nil {
+ return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err)
+ }
+
+ if len(arr) != 2 {
+ return nil, fmt.Errorf("unexpected container stack item count (%s): %d", deletionInfoMethod, len(arr))
+ }
+
+ rawOwner, err := client.BytesFromStackItem(arr[0])
+ if err != nil {
+ return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err)
+ }
+
+ var owner user.ID
+ if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil {
+ return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err)
+ }
+
+ epoch, err := client.BigIntFromStackItem(arr[1])
+ if err != nil {
+ return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err)
+ }
+
+ return &containercore.DelInfo{
+ Owner: owner,
+ Epoch: epoch.Uint64(),
+ }, nil
+}
diff --git a/pkg/morph/client/container/eacl.go b/pkg/morph/client/container/eacl.go
deleted file mode 100644
index 56c36c1796..0000000000
--- a/pkg/morph/client/container/eacl.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package container
-
-import (
- "crypto/sha256"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
-)
-
-// GetEACL reads the extended ACL table from FrostFS system
-// through Container contract call.
-//
-// Returns apistatus.EACLNotFound if eACL table is missing in the contract.
-func (c *Client) GetEACL(cnr cid.ID) (*container.EACL, error) {
- binCnr := make([]byte, sha256.Size)
- cnr.Encode(binCnr)
-
- prm := client.TestInvokePrm{}
- prm.SetMethod(eaclMethod)
- prm.SetArgs(binCnr)
-
- prms, err := c.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", eaclMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", eaclMethod, ln)
- }
-
- arr, err := client.ArrayFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get item array of eACL (%s): %w", eaclMethod, err)
- }
-
- if len(arr) != 4 {
- return nil, fmt.Errorf("unexpected eacl stack item count (%s): %d", eaclMethod, len(arr))
- }
-
- rawEACL, err := client.BytesFromStackItem(arr[0])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL (%s): %w", eaclMethod, err)
- }
-
- sig, err := client.BytesFromStackItem(arr[1])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL signature (%s): %w", eaclMethod, err)
- }
-
- // Client may not return errors if the table is missing, so check this case additionally.
- // The absence of a signature in the response can be taken as an eACL absence criterion,
- // since unsigned table cannot be approved in the storage by design.
- if len(sig) == 0 {
- var errEACLNotFound apistatus.EACLNotFound
-
- return nil, errEACLNotFound
- }
-
- pub, err := client.BytesFromStackItem(arr[2])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL public key (%s): %w", eaclMethod, err)
- }
-
- binToken, err := client.BytesFromStackItem(arr[3])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array of eACL session token (%s): %w", eaclMethod, err)
- }
-
- var res container.EACL
-
- res.Value = eacl.NewTable()
- if err = res.Value.Unmarshal(rawEACL); err != nil {
- return nil, err
- }
-
- if len(binToken) > 0 {
- res.Session = new(session.Container)
-
- err = res.Session.Unmarshal(binToken)
- if err != nil {
- return nil, fmt.Errorf("could not unmarshal session token: %w", err)
- }
- }
-
- // TODO(@cthulhu-rider): #1387 implement and use another approach to avoid conversion
- var sigV2 refs.Signature
- sigV2.SetKey(pub)
- sigV2.SetSign(sig)
- sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256)
-
- err = res.Signature.ReadFromV2(sigV2)
- return &res, err
-}
diff --git a/pkg/morph/client/container/eacl_set.go b/pkg/morph/client/container/eacl_set.go
deleted file mode 100644
index 86eae4c2bf..0000000000
--- a/pkg/morph/client/container/eacl_set.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package container
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-// PutEACL marshals table, and passes it to Wrapper's PutEACLBinary method
-// along with sig.Key() and sig.Sign().
-//
-// Returns error if table is nil.
-//
-// If TryNotary is provided, calls notary contract.
-func PutEACL(c *Client, eaclInfo containercore.EACL) error {
- if eaclInfo.Value == nil {
- return errNilArgument
- }
-
- data, err := eaclInfo.Value.Marshal()
- if err != nil {
- return fmt.Errorf("can't marshal eacl table: %w", err)
- }
-
- var prm PutEACLPrm
- prm.SetTable(data)
-
- if eaclInfo.Session != nil {
- prm.SetToken(eaclInfo.Session.Marshal())
- }
-
- // TODO(@cthulhu-rider): #1387 implement and use another approach to avoid conversion
- var sigV2 refs.Signature
- eaclInfo.Signature.WriteToV2(&sigV2)
-
- prm.SetKey(sigV2.GetKey())
- prm.SetSignature(sigV2.GetSign())
-
- return c.PutEACL(prm)
-}
-
-// PutEACLPrm groups parameters of PutEACL operation.
-type PutEACLPrm struct {
- table []byte
- key []byte
- sig []byte
- token []byte
-
- client.InvokePrmOptional
-}
-
-// SetTable sets table.
-func (p *PutEACLPrm) SetTable(table []byte) {
- p.table = table
-}
-
-// SetKey sets key.
-func (p *PutEACLPrm) SetKey(key []byte) {
- p.key = key
-}
-
-// SetSignature sets signature.
-func (p *PutEACLPrm) SetSignature(sig []byte) {
- p.sig = sig
-}
-
-// SetToken sets session token.
-func (p *PutEACLPrm) SetToken(token []byte) {
- p.token = token
-}
-
-// PutEACL saves binary eACL table with its session token, key and signature
-// in FrostFS system through Container contract call.
-//
-// Returns any error encountered that caused the saving to interrupt.
-func (c *Client) PutEACL(p PutEACLPrm) error {
- if len(p.sig) == 0 || len(p.key) == 0 {
- return errNilArgument
- }
-
- prm := client.InvokePrm{}
- prm.SetMethod(setEACLMethod)
- prm.SetArgs(p.table, p.sig, p.key, p.token)
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- err := c.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", setEACLMethod, err)
- }
- return nil
-}
diff --git a/pkg/morph/client/container/estimations.go b/pkg/morph/client/container/estimations.go
deleted file mode 100644
index 6adf674766..0000000000
--- a/pkg/morph/client/container/estimations.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package container
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-// StartEstimationPrm groups parameters of StartEstimation operation.
-type StartEstimationPrm struct {
- commonEstimationPrm
-}
-
-// StopEstimationPrm groups parameters of StopEstimation operation.
-type StopEstimationPrm struct {
- commonEstimationPrm
-}
-
-type commonEstimationPrm struct {
- epoch uint64
-
- client.InvokePrmOptional
-}
-
-// SetEpoch sets epoch.
-func (p *commonEstimationPrm) SetEpoch(epoch uint64) {
- p.epoch = epoch
-}
-
-// StartEstimation votes to produce start estimation notification.
-func (c *Client) StartEstimation(p StartEstimationPrm) error {
- prm := client.InvokePrm{}
- prm.SetMethod(startEstimationMethod)
- prm.SetArgs(p.epoch)
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- if err := c.client.Invoke(prm); err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", startEstimationMethod, err)
- }
- return nil
-}
-
-// StopEstimation votes to produce stop estimation notification.
-func (c *Client) StopEstimation(p StopEstimationPrm) error {
- prm := client.InvokePrm{}
- prm.SetMethod(stopEstimationMethod)
- prm.SetArgs(p.epoch)
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- if err := c.client.Invoke(prm); err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", stopEstimationMethod, err)
- }
- return nil
-}
diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go
index 8d68141a51..8622d2cdd7 100644
--- a/pkg/morph/client/container/get.go
+++ b/pkg/morph/client/container/get.go
@@ -1,15 +1,15 @@
package container
import (
+ "context"
"crypto/sha256"
"fmt"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@@ -17,22 +17,26 @@ import (
type containerSource Client
-func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) {
- return Get((*Client)(x), cnr)
+func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) {
+ return Get(ctx, (*Client)(x), cnr)
}
// AsContainerSource provides container Source interface
// from Wrapper instance.
-func AsContainerSource(w *Client) core.Source {
+func AsContainerSource(w *Client) containercore.Source {
return (*containerSource)(w)
}
+type getContainer interface {
+ Get(ctx context.Context, cid []byte) (*containercore.Container, error)
+}
+
// Get marshals container ID, and passes it to Wrapper's Get method.
-func Get(c *Client, cnr cid.ID) (*containercore.Container, error) {
+func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.Get(binCnr)
+ return c.Get(ctx, binCnr)
}
// Get reads the container from FrostFS system by binary identifier
@@ -40,26 +44,24 @@ func Get(c *Client, cnr cid.ID) (*containercore.Container, error) {
//
// If an empty slice is returned for the requested identifier,
// storage.ErrNotFound error is returned.
-func (c *Client) Get(cid []byte) (*containercore.Container, error) {
+func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(getMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
- var errNotFound apistatus.ContainerNotFound
-
- return nil, errNotFound
+ return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get item array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err)
}
if len(arr) != 4 {
@@ -68,29 +70,29 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) {
cnrBytes, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err)
}
sigBytes, err := client.BytesFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err)
}
pub, err := client.BytesFromStackItem(arr[2])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of public key (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err)
}
tokBytes, err := client.BytesFromStackItem(arr[3])
if err != nil {
- return nil, fmt.Errorf("could not get byte array of session token (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err)
}
var cnr containercore.Container
if err := cnr.Value.Unmarshal(cnrBytes); err != nil {
// use other major version if there any
- return nil, fmt.Errorf("can't unmarshal container: %w", err)
+ return nil, fmt.Errorf("unmarshal container: %w", err)
}
if len(tokBytes) > 0 {
@@ -98,11 +100,11 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) {
err = cnr.Session.Unmarshal(tokBytes)
if err != nil {
- return nil, fmt.Errorf("could not unmarshal session token: %w", err)
+ return nil, fmt.Errorf("unmarshal session token: %w", err)
}
}
- // TODO(@cthulhu-rider): #1387 implement and use another approach to avoid conversion
+ // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion
var sigV2 refs.Signature
sigV2.SetKey(pub)
sigV2.SetSign(sigBytes)
diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go
index 8f165f4b1e..fc63d1beb2 100644
--- a/pkg/morph/client/container/list.go
+++ b/pkg/morph/client/container/list.go
@@ -1,20 +1,22 @@
package container
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
)
-// List returns a list of container identifiers belonging
+// iterate iterates through a list of container identifiers belonging
// to the specified user of FrostFS system. The list is composed
// through Container contract call.
//
-// Returns the identifiers of all FrostFS containers if pointer
+// Iterates through the identifiers of all FrostFS containers if pointer
// to user identifier is nil.
-func (c *Client) List(idUser *user.ID) ([]cid.ID, error) {
+func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error {
var rawID []byte
if idUser != nil {
@@ -25,34 +27,43 @@ func (c *Client) List(idUser *user.ID) ([]cid.ID, error) {
prm.SetMethod(listMethod)
prm.SetArgs(rawID)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listMethod, err)
+ return fmt.Errorf("test invoke (%s): %w", listMethod, err)
} else if ln := len(res); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
+ return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
}
res, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listMethod, err)
+ return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err)
}
- cidList := make([]cid.ID, 0, len(res))
for i := range res {
- rawID, err := client.BytesFromStackItem(res[i])
+ id, err := getCIDfromStackItem(res[i])
if err != nil {
- return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listMethod, err)
+ return err
}
- var id cid.ID
-
- err = id.Decode(rawID)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
+ if err = cb(id); err != nil {
+ return err
}
-
- cidList = append(cidList, id)
}
- return cidList, nil
+ return nil
+}
+
+func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) {
+ rawID, err := client.BytesFromStackItem(item)
+ if err != nil {
+ return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err)
+ }
+
+ var id cid.ID
+
+ err = id.Decode(rawID)
+ if err != nil {
+ return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
+ }
+ return id, nil
}
diff --git a/pkg/morph/client/container/load.go b/pkg/morph/client/container/load.go
deleted file mode 100644
index dcf89f73e9..0000000000
--- a/pkg/morph/client/container/load.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package container
-
-import (
- "crypto/sha256"
- "fmt"
-
- v2refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
-)
-
-// AnnounceLoadPrm groups parameters of AnnounceLoad operation.
-type AnnounceLoadPrm struct {
- a container.SizeEstimation
- key []byte
-
- client.InvokePrmOptional
-}
-
-// SetAnnouncement sets announcement.
-func (a2 *AnnounceLoadPrm) SetAnnouncement(a container.SizeEstimation) {
- a2.a = a
-}
-
-// SetReporter sets public key of the reporter.
-func (a2 *AnnounceLoadPrm) SetReporter(key []byte) {
- a2.key = key
-}
-
-// AnnounceLoad saves container size estimation calculated by storage node
-// with key in FrostFS system through Container contract call.
-//
-// Returns any error encountered that caused the saving to interrupt.
-func (c *Client) AnnounceLoad(p AnnounceLoadPrm) error {
- binCnr := make([]byte, sha256.Size)
- p.a.Container().Encode(binCnr)
-
- prm := client.InvokePrm{}
- prm.SetMethod(putSizeMethod)
- prm.SetArgs(p.a.Epoch(), binCnr, p.a.Value(), p.key)
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- err := c.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", putSizeMethod, err)
- }
- return nil
-}
-
-// EstimationID is an identity of container load estimation inside Container contract.
-type EstimationID []byte
-
-// ListLoadEstimationsByEpoch returns a list of container load estimations for to the specified epoch.
-// The list is composed through Container contract call.
-func (c *Client) ListLoadEstimationsByEpoch(epoch uint64) ([]EstimationID, error) {
- invokePrm := client.TestInvokePrm{}
- invokePrm.SetMethod(listSizesMethod)
- invokePrm.SetArgs(epoch)
-
- prms, err := c.client.TestInvoke(invokePrm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listSizesMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", listSizesMethod, ln)
- }
-
- prms, err = client.ArrayFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listSizesMethod, err)
- }
-
- res := make([]EstimationID, 0, len(prms))
- for i := range prms {
- id, err := client.BytesFromStackItem(prms[i])
- if err != nil {
- return nil, fmt.Errorf("could not get ID byte array from stack item (%s): %w", listSizesMethod, err)
- }
-
- res = append(res, id)
- }
-
- return res, nil
-}
-
-// Estimation is a structure of single container load estimation
-// reported by storage node.
-type Estimation struct {
- Size uint64
-
- Reporter []byte
-}
-
-// Estimations is a structure of grouped container load estimation inside Container contract.
-type Estimations struct {
- ContainerID cid.ID
-
- Values []Estimation
-}
-
-// GetUsedSpaceEstimations returns a list of container load estimations by ID.
-// The list is composed through Container contract call.
-func (c *Client) GetUsedSpaceEstimations(id EstimationID) (*Estimations, error) {
- prm := client.TestInvokePrm{}
- prm.SetMethod(getSizeMethod)
- prm.SetArgs([]byte(id))
-
- prms, err := c.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getSizeMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", getSizeMethod, ln)
- }
-
- prms, err = client.ArrayFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get stack items of estimation fields from stack item (%s): %w", getSizeMethod, err)
- } else if ln := len(prms); ln != 2 {
- return nil, fmt.Errorf("unexpected stack item count of estimations fields (%s)", getSizeMethod)
- }
-
- rawCnr, err := client.BytesFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get container ID byte array from stack item (%s): %w", getSizeMethod, err)
- }
-
- prms, err = client.ArrayFromStackItem(prms[1])
- if err != nil {
- return nil, fmt.Errorf("could not get estimation list array from stack item (%s): %w", getSizeMethod, err)
- }
-
- var cnr cid.ID
-
- err = cnr.Decode(rawCnr)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
- }
-
- v2 := new(v2refs.ContainerID)
- v2.SetValue(rawCnr)
- res := &Estimations{
- ContainerID: cnr,
- Values: make([]Estimation, 0, len(prms)),
- }
-
- for i := range prms {
- arr, err := client.ArrayFromStackItem(prms[i])
- if err != nil {
- return nil, fmt.Errorf("could not get estimation struct from stack item (%s): %w", getSizeMethod, err)
- } else if ln := len(arr); ln != 2 {
- return nil, fmt.Errorf("unexpected stack item count of estimation fields (%s)", getSizeMethod)
- }
-
- reporter, err := client.BytesFromStackItem(arr[0])
- if err != nil {
- return nil, fmt.Errorf("could not get reporter byte array from stack item (%s): %w", getSizeMethod, err)
- }
-
- sz, err := client.IntFromStackItem(arr[1])
- if err != nil {
- return nil, fmt.Errorf("could not get estimation size from stack item (%s): %w", getSizeMethod, err)
- }
-
- res.Values = append(res.Values, Estimation{
- Reporter: reporter,
- Size: uint64(sz),
- })
- }
-
- return res, nil
-}
diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go
index 2c97446c6a..3bb84eb87c 100644
--- a/pkg/morph/client/container/put.go
+++ b/pkg/morph/client/container/put.go
@@ -1,11 +1,12 @@
package container
import (
+ "context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
@@ -14,7 +15,7 @@ import (
// along with sig.Key() and sig.Sign().
//
// Returns error if container is nil.
-func Put(c *Client, cnr containercore.Container) (*cid.ID, error) {
+func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) {
data := cnr.Value.Marshal()
d := container.ReadDomain(cnr.Value)
@@ -28,14 +29,14 @@ func Put(c *Client, cnr containercore.Container) (*cid.ID, error) {
prm.SetToken(cnr.Session.Marshal())
}
- // TODO(@cthulhu-rider): #1387 implement and use another approach to avoid conversion
+ // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion
var sigV2 refs.Signature
cnr.Signature.WriteToV2(&sigV2)
prm.SetKey(sigV2.GetKey())
prm.SetSignature(sigV2.GetSign())
- err := c.Put(prm)
+ err := c.Put(ctx, prm)
if err != nil {
return nil, err
}
@@ -93,9 +94,7 @@ func (p *PutPrm) SetZone(zone string) {
//
// Returns calculated container identifier and any error
// encountered that caused the saving to interrupt.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) Put(p PutPrm) error {
+func (c *Client) Put(ctx context.Context, p PutPrm) error {
if len(p.sig) == 0 || len(p.key) == 0 {
return errNilArgument
}
@@ -116,9 +115,9 @@ func (c *Client) Put(p PutPrm) error {
prm.SetMethod(method)
prm.InvokePrmOptional = p.InvokePrmOptional
- err := c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", method, err)
+ return fmt.Errorf("invoke method (%s): %w", method, err)
}
return nil
}
diff --git a/pkg/morph/client/fee.go b/pkg/morph/client/fee.go
deleted file mode 100644
index 8a38c4f55c..0000000000
--- a/pkg/morph/client/fee.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package client
-
-import "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
-
-// setFeeForMethod sets fee for the operation executed using specified contract method.
-func (x *fees) setFeeForMethod(method string, fee fixedn.Fixed8) {
- if x.customFees == nil {
- x.customFees = make(map[string]fixedn.Fixed8, 1)
- }
-
- x.customFees[method] = fee
-}
-
-// fees represents source of per-operation fees.
-// Can be initialized using var declaration.
-//
-// Instances are not thread-safe, so they mean initially filling, and then only reading.
-type fees struct {
- defaultFee fixedn.Fixed8
-
- // customFees represents source of customized per-operation fees.
- customFees map[string]fixedn.Fixed8
-}
-
-// returns fee for the operation executed using specified contract method.
-// Returns customized value if it is set. Otherwise, returns default value.
-func (x fees) feeForMethod(method string) fixedn.Fixed8 {
- if x.customFees != nil {
- if fee, ok := x.customFees[method]; ok {
- return fee
- }
- }
-
- return x.defaultFee
-}
diff --git a/pkg/morph/client/fee_test.go b/pkg/morph/client/fee_test.go
deleted file mode 100644
index 963d64ce42..0000000000
--- a/pkg/morph/client/fee_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client
-
-import (
- "testing"
-
- "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
- "github.com/stretchr/testify/require"
-)
-
-func TestFees(t *testing.T) {
- var v fees
-
- const method = "some method"
-
- var (
- fee fixedn.Fixed8
- def = fixedn.Fixed8(13)
- )
-
- v.defaultFee = def
-
- fee = v.feeForMethod(method)
- require.True(t, fee.Equal(def))
-
- const customFee = fixedn.Fixed8(10)
-
- v.setFeeForMethod(method, customFee)
-
- fee = v.feeForMethod(method)
-
- require.Equal(t, customFee, fee)
-}
diff --git a/pkg/morph/client/frostfs/bind.go b/pkg/morph/client/frostfs/bind.go
deleted file mode 100644
index 5b15d5c7b4..0000000000
--- a/pkg/morph/client/frostfs/bind.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package frostfscontract
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-type commonBindArgs struct {
- scriptHash []byte // script hash of account identifier
-
- keys [][]byte // list of serialized public keys
-
- client.InvokePrmOptional
-}
-
-// SetOptionalPrm sets optional client parameters.
-func (x *commonBindArgs) SetOptionalPrm(op client.InvokePrmOptional) {
- x.InvokePrmOptional = op
-}
-
-// SetScriptHash sets script hash of the FrostFS account identifier.
-func (x *commonBindArgs) SetScriptHash(v []byte) {
- x.scriptHash = v
-}
-
-// SetKeys sets a list of public keys in a binary format.
-func (x *commonBindArgs) SetKeys(v [][]byte) {
- x.keys = v
-}
-
-// BindKeysPrm groups parameters of BindKeys operation.
-type BindKeysPrm struct {
- commonBindArgs
-}
-
-// BindKeys binds list of public keys from FrostFS account by script hash.
-func (x *Client) BindKeys(p BindKeysPrm) error {
- prm := client.InvokePrm{}
- prm.SetMethod(bindKeysMethod)
- prm.SetArgs(p.scriptHash, p.keys)
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- err := x.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", bindKeysMethod, err)
- }
-
- return nil
-}
-
-// UnbindKeysPrm groups parameters of UnbindKeys operation.
-type UnbindKeysPrm struct {
- commonBindArgs
-}
-
-// UnbindKeys invokes the call of key unbinding method
-// of FrostFS contract.
-func (x *Client) UnbindKeys(args UnbindKeysPrm) error {
- prm := client.InvokePrm{}
- prm.SetMethod(unbindKeysMethod)
- prm.SetArgs(args.scriptHash, args.keys)
- prm.InvokePrmOptional = args.InvokePrmOptional
-
- err := x.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", unbindKeysMethod, err)
- }
-
- return nil
-}
diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go
index e8f4f7f18c..d3eba76392 100644
--- a/pkg/morph/client/frostfs/cheque.go
+++ b/pkg/morph/client/frostfs/cheque.go
@@ -1,6 +1,8 @@
package frostfscontract
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -37,13 +39,14 @@ func (c *ChequePrm) SetLock(lock util.Uint160) {
}
// Cheque invokes `cheque` method of FrostFS contract.
-func (x *Client) Cheque(p ChequePrm) error {
+func (x *Client) Cheque(ctx context.Context, p ChequePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(chequeMethod)
prm.SetArgs(p.id, p.user, p.amount, p.lock)
prm.InvokePrmOptional = p.InvokePrmOptional
- return x.client.Invoke(prm)
+ _, err := x.client.Invoke(ctx, prm)
+ return err
}
// AlphabetUpdatePrm groups parameters of AlphabetUpdate operation.
@@ -65,11 +68,12 @@ func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) {
}
// AlphabetUpdate update list of alphabet nodes.
-func (x *Client) AlphabetUpdate(p AlphabetUpdatePrm) error {
+func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(alphabetUpdateMethod)
prm.SetArgs(p.id, p.pubs)
prm.InvokePrmOptional = p.InvokePrmOptional
- return x.client.Invoke(prm)
+ _, err := x.client.Invoke(ctx, prm)
+ return err
}
diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go
index 3e3e70ec02..cd6a9849ec 100644
--- a/pkg/morph/client/frostfs/client.go
+++ b/pkg/morph/client/frostfs/client.go
@@ -21,8 +21,6 @@ type Client struct {
}
const (
- bindKeysMethod = "bind"
- unbindKeysMethod = "unbind"
alphabetUpdateMethod = "alphabetUpdate"
chequeMethod = "cheque"
)
@@ -37,7 +35,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("could not create client of FrostFS contract: %w", err)
+ return nil, fmt.Errorf("create 'frostfs' contract client: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/addrm_keys.go b/pkg/morph/client/frostfsid/addrm_keys.go
deleted file mode 100644
index cbbd05142d..0000000000
--- a/pkg/morph/client/frostfsid/addrm_keys.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package frostfsid
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-type CommonBindPrm struct {
- ownerID []byte // FrostFS account identifier
-
- keys [][]byte // list of serialized public keys
-
- client.InvokePrmOptional
-}
-
-func (x *CommonBindPrm) SetOptionalPrm(prm client.InvokePrmOptional) {
- x.InvokePrmOptional = prm
-}
-
-// SetOwnerID sets FrostFS account identifier.
-func (x *CommonBindPrm) SetOwnerID(v []byte) {
- x.ownerID = v
-}
-
-// SetKeys sets a list of public keys in a binary format.
-func (x *CommonBindPrm) SetKeys(v [][]byte) {
- x.keys = v
-}
-
-// AddKeys adds a list of public keys to/from FrostFS account.
-func (x *Client) AddKeys(p CommonBindPrm) error {
- prm := client.InvokePrm{}
-
- prm.SetMethod(addKeysMethod)
- prm.SetArgs(p.ownerID, p.keys)
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- err := x.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", addKeysMethod, err)
- }
-
- return nil
-}
-
-// RemoveKeys removes a list of public keys to/from FrostFS account.
-func (x *Client) RemoveKeys(args CommonBindPrm) error {
- prm := client.InvokePrm{}
-
- prm.SetMethod(removeKeysMethod)
- prm.SetArgs(args.ownerID, args.keys)
- prm.InvokePrmOptional = args.InvokePrmOptional
-
- err := x.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", removeKeysMethod, err)
- }
-
- return nil
-}
diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go
index dded61aff1..61eb03f099 100644
--- a/pkg/morph/client/frostfsid/client.go
+++ b/pkg/morph/client/frostfsid/client.go
@@ -3,6 +3,7 @@ package frostfsid
import (
"fmt"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -20,53 +21,14 @@ type Client struct {
client *client.StaticClient // static FrostFS ID contract client
}
-const (
- keyListingMethod = "key"
- addKeysMethod = "addKey"
- removeKeysMethod = "removeKey"
-)
+var _ frostfsidcore.SubjectProvider = (*Client)(nil)
// NewFromMorph wraps client to work with FrostFS ID contract.
-func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...Option) (*Client, error) {
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
+func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) {
+ sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet())
if err != nil {
- return nil, fmt.Errorf("could not create client of FrostFS ID contract: %w", err)
+ return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err)
}
return &Client{client: sc}, nil
}
-
-// Option allows to set an optional
-// parameter of ClientWrapper.
-type Option func(*opts)
-
-type opts []client.StaticClientOption
-
-func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- *o = append(*o, client.TryNotary())
- }
-}
-
-// AsAlphabet returns option to sign main TX
-// of notary requests with client's private
-// key.
-//
-// Considered to be used by IR nodes only.
-func AsAlphabet() Option {
- return func(o *opts) {
- *o = append(*o, client.AsAlphabet())
- }
-}
diff --git a/pkg/morph/client/frostfsid/keys.go b/pkg/morph/client/frostfsid/keys.go
deleted file mode 100644
index 3bae7adfba..0000000000
--- a/pkg/morph/client/frostfsid/keys.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package frostfsid
-
-import (
- "crypto/elliptic"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-// AccountKeysPrm groups parameters of AccountKeys operation.
-type AccountKeysPrm struct {
- id user.ID
-}
-
-// SetID sets owner ID.
-func (a *AccountKeysPrm) SetID(id user.ID) {
- a.id = id
-}
-
-// AccountKeys requests public keys of FrostFS account from FrostFS ID contract.
-func (x *Client) AccountKeys(p AccountKeysPrm) (keys.PublicKeys, error) {
- prm := client.TestInvokePrm{}
- prm.SetMethod(keyListingMethod)
- prm.SetArgs(p.id.WalletBytes())
-
- items, err := x.client.TestInvoke(prm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", keyListingMethod, err)
- } else if ln := len(items); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", keyListingMethod, ln)
- }
-
- items, err = client.ArrayFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("1st stack item must be an array (%s)", keyListingMethod)
- }
-
- pubs := make(keys.PublicKeys, len(items))
- for i := range items {
- rawPub, err := client.BytesFromStackItem(items[i])
- if err != nil {
- return nil, fmt.Errorf("invalid stack item, expected byte array (%s)", keyListingMethod)
- }
-
- pubs[i], err = keys.NewPublicKeyFromBytes(rawPub, elliptic.P256())
- if err != nil {
- return nil, fmt.Errorf("received invalid key (%s): %w", keyListingMethod, err)
- }
- }
-
- return pubs, nil
-}
diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go
new file mode 100644
index 0000000000..3a789672a3
--- /dev/null
+++ b/pkg/morph/client/frostfsid/subject.go
@@ -0,0 +1,74 @@
+package frostfsid
+
+import (
+ "context"
+ "fmt"
+
+ frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+const (
+ methodGetSubject = "getSubject"
+ methodGetSubjectExtended = "getSubjectExtended"
+)
+
+func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
+ prm := client.TestInvokePrm{}
+ prm.SetMethod(methodGetSubject)
+ prm.SetArgs(addr)
+
+ res, err := c.client.TestInvoke(ctx, prm)
+ if err != nil {
+ return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err)
+ }
+
+ structArr, err := checkStackItem(res)
+ if err != nil {
+ return nil, fmt.Errorf("invalid test invocation result (%s): %w", methodGetSubjectExtended, err)
+ }
+
+ subj, err := frostfsidclient.ParseSubject(structArr)
+ if err != nil {
+ return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
+ }
+
+ return subj, nil
+}
+
+func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
+ prm := client.TestInvokePrm{}
+ prm.SetMethod(methodGetSubjectExtended)
+ prm.SetArgs(addr)
+
+ res, err := c.client.TestInvoke(ctx, prm)
+ if err != nil {
+ return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err)
+ }
+
+ structArr, err := checkStackItem(res)
+ if err != nil {
+ return nil, fmt.Errorf("invalid test invocation result (%s): %w", methodGetSubjectExtended, err)
+ }
+
+ subj, err := frostfsidclient.ParseSubjectExtended(structArr)
+ if err != nil {
+ return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
+ }
+
+ return subj, nil
+}
+
+func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error) {
+ if ln := len(res); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", methodGetSubject, ln)
+ }
+
+ structArr, err = client.ArrayFromStackItem(res[0])
+ if err != nil {
+ return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err)
+ }
+ return
+}
diff --git a/pkg/morph/client/mtls.go b/pkg/morph/client/mtls.go
new file mode 100644
index 0000000000..3de51afe78
--- /dev/null
+++ b/pkg/morph/client/mtls.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "crypto/tls"
+
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+)
+
+// MTLSConfig represents endpoint mTLS configuration.
+type MTLSConfig struct {
+ TrustedCAList []string
+ KeyFile string
+ CertFile string
+}
+
+func (m *MTLSConfig) parse() (*tls.Config, error) {
+ if m == nil {
+ return nil, nil
+ }
+
+ return rpcclient.TLSClientConfig(m.TrustedCAList, m.CertFile, m.KeyFile)
+}
diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go
index e0eecd9295..b9e39c25e1 100644
--- a/pkg/morph/client/multi.go
+++ b/pkg/morph/client/multi.go
@@ -1,16 +1,20 @@
package client
import (
+ "context"
+ "slices"
"sort"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
// Endpoint represents morph endpoint together with its priority.
type Endpoint struct {
- Address string
- Priority int
+ Address string
+ Priority int
+ MTLSConfig *MTLSConfig
}
type endpoints struct {
@@ -27,7 +31,8 @@ func (e *endpoints) init(ee []Endpoint) {
e.list = ee
}
-func (c *Client) switchRPC() bool {
+// SwitchRPC performs reconnection and returns true if it was successful.
+func (c *Client) SwitchRPC(ctx context.Context) bool {
c.switchLock.Lock()
defer c.switchLock.Unlock()
@@ -35,11 +40,11 @@ func (c *Client) switchRPC() bool {
// Iterate endpoints in the order of decreasing priority.
for c.endpoints.curr = range c.endpoints.list {
- newEndpoint := c.endpoints.list[c.endpoints.curr].Address
- cli, act, err := c.newCli(newEndpoint)
+ newEndpoint := c.endpoints.list[c.endpoints.curr]
+ cli, act, err := c.newCli(ctx, newEndpoint)
if err != nil {
- c.logger.Warn("could not establish connection to the switched RPC node",
- zap.String("endpoint", newEndpoint),
+ c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
+ zap.String("endpoint", newEndpoint.Address),
zap.Error(err),
)
@@ -48,18 +53,8 @@ func (c *Client) switchRPC() bool {
c.cache.invalidate()
- c.logger.Info("connection to the new RPC node has been established",
- zap.String("endpoint", newEndpoint))
-
- if !c.restoreSubscriptions(cli, newEndpoint) {
- // new WS client does not allow
- // restoring subscription, client
- // could not work correctly =>
- // closing connection to RPC node
- // to switch to another one
- cli.Close()
- continue
- }
+ c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
+ zap.String("endpoint", newEndpoint.Address))
c.client = cli
c.setActor(act)
@@ -67,84 +62,32 @@ func (c *Client) switchRPC() bool {
if c.cfg.switchInterval != 0 && !c.switchIsActive.Load() &&
c.endpoints.list[c.endpoints.curr].Priority != c.endpoints.list[0].Priority {
c.switchIsActive.Store(true)
- go c.switchToMostPrioritized()
+ go c.switchToMostPrioritized(ctx)
}
return true
}
+ c.inactive = true
+
+ if c.cfg.inactiveModeCb != nil {
+ c.cfg.inactiveModeCb()
+ }
return false
}
-func (c *Client) notificationLoop() {
- for {
- c.switchLock.RLock()
- nChan := c.client.Notifications
- c.switchLock.RUnlock()
-
- select {
- case <-c.cfg.ctx.Done():
- _ = c.UnsubscribeAll()
- c.close()
-
- return
- case <-c.closeChan:
- _ = c.UnsubscribeAll()
- c.close()
-
- return
- case n, ok := <-nChan:
- // notification channel is used as a connection
- // state: if it is closed, the connection is
- // considered to be lost
- if !ok {
- if closeErr := c.client.GetError(); closeErr != nil {
- c.logger.Warn("switching to the next RPC node",
- zap.String("reason", closeErr.Error()),
- )
- } else {
- // neo-go client was closed by calling `Close`
- // method that happens only when the client has
- // switched to the more prioritized RPC
- continue
- }
-
- if !c.switchRPC() {
- c.logger.Error("could not establish connection to any RPC node")
-
- // could not connect to all endpoints =>
- // switch client to inactive mode
- c.inactiveMode()
-
- return
- }
-
- // TODO(@carpawell): call here some callback retrieved in constructor
- // of the client to allow checking chain state since during switch
- // process some notification could be lost
-
- continue
- }
-
- select {
- case c.notifications <- n:
- continue
- case <-c.cfg.ctx.Done():
- _ = c.UnsubscribeAll()
- c.close()
-
- return
- case <-c.closeChan:
- _ = c.UnsubscribeAll()
- c.close()
-
- return
- }
- }
+func (c *Client) closeWaiter(ctx context.Context) {
+ c.wg.Add(1)
+ defer c.wg.Done()
+ select {
+ case <-ctx.Done():
+ case <-c.closeChan:
}
+ _ = c.UnsubscribeAll()
+ c.close()
}
-func (c *Client) switchToMostPrioritized() {
+func (c *Client) switchToMostPrioritized(ctx context.Context) {
t := time.NewTicker(c.cfg.switchInterval)
defer t.Stop()
defer c.switchIsActive.Store(false)
@@ -152,15 +95,15 @@ func (c *Client) switchToMostPrioritized() {
mainLoop:
for {
select {
- case <-c.cfg.ctx.Done():
+ case <-ctx.Done():
return
case <-t.C:
c.switchLock.RLock()
- endpointsCopy := make([]Endpoint, len(c.endpoints.list))
- copy(endpointsCopy, c.endpoints.list)
+ endpointsCopy := slices.Clone(c.endpoints.list)
currPriority := c.endpoints.list[c.endpoints.curr].Priority
highestPriority := c.endpoints.list[0].Priority
+
c.switchLock.RUnlock()
if currPriority == highestPriority {
@@ -177,44 +120,37 @@ mainLoop:
tryE := e.Address
- cli, act, err := c.newCli(tryE)
+ cli, act, err := c.newCli(ctx, e)
if err != nil {
- c.logger.Warn("could not create client to the higher priority node",
+ c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
zap.String("endpoint", tryE),
zap.Error(err),
)
continue
}
- if c.restoreSubscriptions(cli, tryE) {
- c.switchLock.Lock()
-
- // higher priority node could have been
- // connected in the other goroutine
- if e.Priority >= c.endpoints.list[c.endpoints.curr].Priority {
- cli.Close()
- c.switchLock.Unlock()
- return
- }
-
- c.client.Close()
- c.cache.invalidate()
- c.client = cli
- c.setActor(act)
- c.endpoints.curr = i
+ c.switchLock.Lock()
+ // higher priority node could have been
+ // connected in the other goroutine
+ if e.Priority >= c.endpoints.list[c.endpoints.curr].Priority {
+ cli.Close()
c.switchLock.Unlock()
-
- c.logger.Info("switched to the higher priority RPC",
- zap.String("endpoint", tryE))
-
return
}
- c.logger.Warn("could not restore side chain subscriptions using node",
- zap.String("endpoint", tryE),
- zap.Error(err),
- )
+ c.client.Close()
+ c.cache.invalidate()
+ c.client = cli
+ c.setActor(act)
+ c.endpoints.curr = i
+
+ c.switchLock.Unlock()
+
+ c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC,
+ zap.String("endpoint", tryE))
+
+ return
}
}
}
@@ -222,6 +158,7 @@ mainLoop:
// close closes notification channel and wrapped WS client.
func (c *Client) close() {
- close(c.notifications)
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
c.client.Close()
}
diff --git a/pkg/morph/client/multy_test.go b/pkg/morph/client/multy_test.go
index 4bc38c70ca..84a07b0a41 100644
--- a/pkg/morph/client/multy_test.go
+++ b/pkg/morph/client/multy_test.go
@@ -9,11 +9,11 @@ import (
)
func TestInitEndpoints(t *testing.T) {
- rand.Seed(time.Now().UnixNano())
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
ee := make([]Endpoint, 100)
for i := range ee {
- ee[i].Priority = rand.Int()
+ ee[i].Priority = r.Int()
}
var eeInternal endpoints
diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go
index eafa097e9e..de8afbfb52 100644
--- a/pkg/morph/client/netmap/client.go
+++ b/pkg/morph/client/netmap/client.go
@@ -52,7 +52,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("can't create netmap static client: %w", err)
+ return nil, fmt.Errorf("create 'netmap' contract client: %w", err)
}
return &Client{client: sc}, nil
@@ -65,15 +65,7 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- *o = append(*o, client.TryNotary())
- }
+ return &opts{client.TryNotary()}
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go
index 6b721cdfb9..3f6aed506d 100644
--- a/pkg/morph/client/netmap/config.go
+++ b/pkg/morph/client/netmap/config.go
@@ -1,9 +1,8 @@
package netmap
import (
- "errors"
+ "context"
"fmt"
- "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/encoding/bigint"
@@ -11,135 +10,59 @@ import (
)
const (
- maxObjectSizeConfig = "MaxObjectSize"
- basicIncomeRateConfig = "BasicIncomeRate"
- auditFeeConfig = "AuditFee"
- epochDurationConfig = "EpochDuration"
- containerFeeConfig = "ContainerFee"
- containerAliasFeeConfig = "ContainerAliasFee"
- etIterationsConfig = "EigenTrustIterations"
- etAlphaConfig = "EigenTrustAlpha"
- irCandidateFeeConfig = "InnerRingCandidateFee"
- withdrawFeeConfig = "WithdrawFee"
- homomorphicHashingDisabledKey = "HomomorphicHashingDisabled"
- maintenanceModeAllowedConfig = "MaintenanceModeAllowed"
+ MaxObjectSizeConfig = "MaxObjectSize"
+ MaxECParityCountConfig = "MaxECParityCount"
+ MaxECDataCountConfig = "MaxECDataCount"
+ EpochDurationConfig = "EpochDuration"
+ ContainerFeeConfig = "ContainerFee"
+ ContainerAliasFeeConfig = "ContainerAliasFee"
+ IrCandidateFeeConfig = "InnerRingCandidateFee"
+ WithdrawFeeConfig = "WithdrawFee"
+ HomomorphicHashingDisabledKey = "HomomorphicHashingDisabled"
+ MaintenanceModeAllowedConfig = "MaintenanceModeAllowed"
)
// MaxObjectSize receives max object size configuration
// value through the Netmap contract call.
-func (c *Client) MaxObjectSize() (uint64, error) {
- objectSize, err := c.readUInt64Config(maxObjectSizeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err)
- }
-
- return objectSize, nil
-}
-
-// BasicIncomeRate returns basic income rate configuration value from network
-// config in netmap contract.
-func (c *Client) BasicIncomeRate() (uint64, error) {
- rate, err := c.readUInt64Config(basicIncomeRateConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get basic income rate: %w", c, err)
- }
-
- return rate, nil
-}
-
-// AuditFee returns audit fee configuration value from network
-// config in netmap contract.
-func (c *Client) AuditFee() (uint64, error) {
- fee, err := c.readUInt64Config(auditFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get audit fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, MaxObjectSizeConfig)
}
// EpochDuration returns number of sidechain blocks per one FrostFS epoch.
-func (c *Client) EpochDuration() (uint64, error) {
- epochDuration, err := c.readUInt64Config(epochDurationConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err)
- }
-
- return epochDuration, nil
+func (c *Client) EpochDuration(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, EpochDurationConfig)
}
// ContainerFee returns fee paid by container owner to each alphabet node
// for container registration.
-func (c *Client) ContainerFee() (uint64, error) {
- fee, err := c.readUInt64Config(containerFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) ContainerFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, ContainerFeeConfig)
}
// ContainerAliasFee returns additional fee paid by container owner to each
// alphabet node for container nice name registration.
-func (c *Client) ContainerAliasFee() (uint64, error) {
- fee, err := c.readUInt64Config(containerAliasFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err)
- }
-
- return fee, nil
-}
-
-// EigenTrustIterations returns global configuration value of iteration cycles
-// for EigenTrust algorithm per epoch.
-func (c *Client) EigenTrustIterations() (uint64, error) {
- iterations, err := c.readUInt64Config(etIterationsConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get eigen trust iterations: %w", c, err)
- }
-
- return iterations, nil
-}
-
-// EigenTrustAlpha returns global configuration value of alpha parameter.
-// It receives the alpha as a string and tries to convert it to float.
-func (c *Client) EigenTrustAlpha() (float64, error) {
- strAlpha, err := c.readStringConfig(etAlphaConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get eigen trust alpha: %w", c, err)
- }
-
- return strconv.ParseFloat(strAlpha, 64)
+func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, ContainerAliasFeeConfig)
}
// HomomorphicHashDisabled returns global configuration value of homomorphic hashing
// settings.
//
// Returns (false, nil) if config key is not found in the contract.
-func (c *Client) HomomorphicHashDisabled() (bool, error) {
- return c.readBoolConfig(homomorphicHashingDisabledKey)
+func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) {
+ return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey)
}
// InnerRingCandidateFee returns global configuration value of fee paid by
// node to be in inner ring candidates list.
-func (c *Client) InnerRingCandidateFee() (uint64, error) {
- fee, err := c.readUInt64Config(irCandidateFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, IrCandidateFeeConfig)
}
// WithdrawFee returns global configuration value of fee paid by user to
// withdraw assets from FrostFS contract.
-func (c *Client) WithdrawFee() (uint64, error) {
- fee, err := c.readUInt64Config(withdrawFeeConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err)
- }
-
- return fee, nil
+func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
+ return c.readUInt64Config(ctx, WithdrawFeeConfig)
}
// MaintenanceModeAllowed reads admission of "maintenance" state from the
@@ -147,44 +70,32 @@ func (c *Client) WithdrawFee() (uint64, error) {
// that storage nodes are allowed to switch their state to "maintenance".
//
// By default, maintenance state is disallowed.
-func (c *Client) MaintenanceModeAllowed() (bool, error) {
- return c.readBoolConfig(maintenanceModeAllowedConfig)
+func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) {
+ return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig)
}
-func (c *Client) readUInt64Config(key string) (uint64, error) {
- v, err := c.config([]byte(key), IntegerAssert)
+func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) {
+ v, err := c.config(ctx, []byte(key))
+ if err != nil {
+ return 0, fmt.Errorf("read netconfig value '%s': %w", key, err)
+ }
+
+ bi, err := v.TryInteger()
if err != nil {
return 0, err
}
-
- // IntegerAssert is guaranteed to return int64 if the error is nil.
- return uint64(v.(int64)), nil
-}
-
-func (c *Client) readStringConfig(key string) (string, error) {
- v, err := c.config([]byte(key), StringAssert)
- if err != nil {
- return "", err
- }
-
- // StringAssert is guaranteed to return string if the error is nil.
- return v.(string), nil
+ return bi.Uint64(), nil
}
// reads boolean value by the given key from the FrostFS network configuration
// stored in the Sidechain. Returns false if key is not presented.
-func (c *Client) readBoolConfig(key string) (bool, error) {
- v, err := c.config([]byte(key), BoolAssert)
+func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) {
+ v, err := c.config(ctx, []byte(key))
if err != nil {
- if errors.Is(err, ErrConfigNotFound) {
- return false, nil
- }
-
- return false, fmt.Errorf("read boolean configuration value %s from the Sidechain: %w", key, err)
+ return false, fmt.Errorf("read netconfig value '%s': %w", key, err)
}
- // BoolAssert is guaranteed to return bool if the error is nil.
- return v.(bool), nil
+ return v.TryBool()
}
// SetConfigPrm groups parameters of SetConfig operation.
@@ -212,13 +123,14 @@ func (s *SetConfigPrm) SetValue(value any) {
}
// SetConfig sets config field.
-func (c *Client) SetConfig(p SetConfigPrm) error {
+func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(setConfigMethod)
prm.SetArgs(p.id, p.key, p.value)
prm.InvokePrmOptional = p.InvokePrmOptional
- return c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
+ return err
}
// RawNetworkParameter is a FrostFS network parameter which is transmitted but
@@ -236,20 +148,12 @@ type RawNetworkParameter struct {
type NetworkConfiguration struct {
MaxObjectSize uint64
- StoragePrice uint64
-
- AuditFee uint64
-
EpochDuration uint64
ContainerFee uint64
ContainerAliasFee uint64
- EigenTrustIterations uint64
-
- EigenTrustAlpha float64
-
IRCandidateFee uint64
WithdrawalFee uint64
@@ -262,14 +166,14 @@ type NetworkConfiguration struct {
}
// ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain.
-func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) {
+func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) {
var res NetworkConfiguration
prm := client.TestInvokePrm{}
prm.SetMethod(configListMethod)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return res, fmt.Errorf("could not perform test invocation (%s): %w",
+ return res, fmt.Errorf("test invoke (%s): %w",
configListMethod, err)
}
@@ -299,32 +203,21 @@ func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) {
Name: name,
Value: value,
})
- case maxObjectSizeConfig:
+ case MaxObjectSizeConfig:
res.MaxObjectSize = bytesToUint64(value)
- case basicIncomeRateConfig:
- res.StoragePrice = bytesToUint64(value)
- case auditFeeConfig:
- res.AuditFee = bytesToUint64(value)
- case epochDurationConfig:
+ case EpochDurationConfig:
res.EpochDuration = bytesToUint64(value)
- case containerFeeConfig:
+ case ContainerFeeConfig:
res.ContainerFee = bytesToUint64(value)
- case containerAliasFeeConfig:
+ case ContainerAliasFeeConfig:
res.ContainerAliasFee = bytesToUint64(value)
- case etIterationsConfig:
- res.EigenTrustIterations = bytesToUint64(value)
- case etAlphaConfig:
- res.EigenTrustAlpha, err = strconv.ParseFloat(string(value), 64)
- if err != nil {
- return fmt.Errorf("invalid prm %s: %v", etAlphaConfig, err)
- }
- case irCandidateFeeConfig:
+ case IrCandidateFeeConfig:
res.IRCandidateFee = bytesToUint64(value)
- case withdrawFeeConfig:
+ case WithdrawFeeConfig:
res.WithdrawalFee = bytesToUint64(value)
- case homomorphicHashingDisabledKey:
+ case HomomorphicHashingDisabledKey:
res.HomomorphicHashingDisabled = bytesToBool(value)
- case maintenanceModeAllowedConfig:
+ case MaintenanceModeAllowedConfig:
res.MaintenanceModeAllowed = bytesToBool(value)
}
@@ -351,22 +244,18 @@ func bytesToBool(val []byte) bool {
return false
}
-// ErrConfigNotFound is returned when the requested key was not found
-// in the network config (returned value is `Null`).
-var ErrConfigNotFound = errors.New("config value not found")
-
// config performs the test invoke of get config value
// method of FrostFS Netmap contract.
//
// Returns ErrConfigNotFound if config key is not found in the contract.
-func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) {
+func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(configMethod)
prm.SetArgs(key)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w",
+ return nil, fmt.Errorf("test invoke (%s): %w",
configMethod, err)
}
@@ -375,26 +264,7 @@ func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (a
configMethod, ln)
}
- if _, ok := items[0].(stackitem.Null); ok {
- return nil, ErrConfigNotFound
- }
-
- return assert(items[0])
-}
-
-// IntegerAssert converts stack item to int64.
-func IntegerAssert(item stackitem.Item) (any, error) {
- return client.IntFromStackItem(item)
-}
-
-// StringAssert converts stack item to string.
-func StringAssert(item stackitem.Item) (any, error) {
- return client.StringFromStackItem(item)
-}
-
-// BoolAssert converts stack item to bool.
-func BoolAssert(item stackitem.Item) (any, error) {
- return client.BoolFromStackItem(item)
+ return items[0], nil
}
// iterateRecords iterates over all config records and passes them to f.
diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go
index 92d569ae2c..8561329ecf 100644
--- a/pkg/morph/client/netmap/epoch.go
+++ b/pkg/morph/client/netmap/epoch.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -8,13 +9,13 @@ import (
// Epoch receives number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) Epoch() (uint64, error) {
+func (c *Client) Epoch(ctx context.Context) (uint64, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(epochMethod)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not perform test invocation (%s): %w",
+ return 0, fmt.Errorf("test invoke (%s): %w",
epochMethod, err)
}
@@ -25,20 +26,20 @@ func (c *Client) Epoch() (uint64, error) {
num, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("could not get number from stack item (%s): %w", epochMethod, err)
+ return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err)
}
return uint64(num), nil
}
// LastEpochBlock receives block number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) LastEpochBlock() (uint32, error) {
+func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(lastEpochBlockMethod)
- items, err := c.client.TestInvoke(prm)
+ items, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return 0, fmt.Errorf("could not perform test invocation (%s): %w",
+ return 0, fmt.Errorf("test invoke (%s): %w",
lastEpochBlockMethod, err)
}
@@ -49,7 +50,7 @@ func (c *Client) LastEpochBlock() (uint32, error) {
block, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("could not get number from stack item (%s): %w",
+ return 0, fmt.Errorf("get number from stack item (%s): %w",
lastEpochBlockMethod, err)
}
return uint32(block), nil
diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go
index 742165b9a3..0e1f9186ba 100644
--- a/pkg/morph/client/netmap/innerring.go
+++ b/pkg/morph/client/netmap/innerring.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"crypto/elliptic"
"fmt"
@@ -23,7 +24,7 @@ func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) {
}
// UpdateInnerRing updates inner ring keys.
-func (c *Client) UpdateInnerRing(p UpdateIRPrm) error {
+func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error {
args := make([][]byte, len(p.keys))
for i := range args {
args[i] = p.keys[i].Bytes()
@@ -34,17 +35,18 @@ func (c *Client) UpdateInnerRing(p UpdateIRPrm) error {
prm.SetArgs(args)
prm.InvokePrmOptional = p.InvokePrmOptional
- return c.client.Invoke(prm)
+ _, err := c.client.Invoke(ctx, prm)
+ return err
}
// GetInnerRingList return current IR list.
-func (c *Client) GetInnerRingList() (keys.PublicKeys, error) {
+func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(innerRingListMethod)
- prms, err := c.client.TestInvoke(invokePrm)
+ prms, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", innerRingListMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err)
}
return irKeysFromStackItem(prms, innerRingListMethod)
@@ -57,7 +59,7 @@ func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys
irs, err := client.ArrayFromStackItem(stack[0])
if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err)
+ return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err)
}
irKeys := make(keys.PublicKeys, len(irs))
@@ -77,7 +79,7 @@ const irNodeFixedPrmNumber = 1
func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
prms, err := client.ArrayFromStackItem(prm)
if err != nil {
- return nil, fmt.Errorf("could not get stack item array (IRNode): %w", err)
+ return nil, fmt.Errorf("get stack item array (IRNode): %w", err)
} else if ln := len(prms); ln != irNodeFixedPrmNumber {
return nil, fmt.Errorf(
"unexpected stack item count (IRNode): expected %d, has %d",
@@ -88,7 +90,7 @@ func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
byteKey, err := client.BytesFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("could not parse bytes from stack item (IRNode): %w", err)
+ return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err)
}
return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256())
diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go
index 61bbf5f171..97782fc251 100644
--- a/pkg/morph/client/netmap/netmap.go
+++ b/pkg/morph/client/netmap/netmap.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"fmt"
netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
@@ -11,14 +12,14 @@ import (
// GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and
// decodes netmap.NetMap from the response.
-func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(epochSnapshotMethod)
invokePrm.SetArgs(epoch)
- res, err := c.client.TestInvoke(invokePrm)
+ res, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w",
+ return nil, fmt.Errorf("test invoke (%s): %w",
epochSnapshotMethod, err)
}
@@ -34,13 +35,13 @@ func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
// GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo
// from the response.
-func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) {
+func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapCandidatesMethod)
- res, err := c.client.TestInvoke(invokePrm)
+ res, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", netMapCandidatesMethod, err)
+ return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err)
}
if len(res) > 0 {
@@ -51,13 +52,13 @@ func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) {
}
// NetMap calls "netmap" method and decode netmap.NetMap from the response.
-func (c *Client) NetMap() (*netmap.NetMap, error) {
+func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapMethod)
- res, err := c.client.TestInvoke(invokePrm)
+ res, err := c.client.TestInvoke(ctx, invokePrm)
if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w",
+ return nil, fmt.Errorf("test invoke (%s): %w",
netMapMethod, err)
}
@@ -136,11 +137,11 @@ func decodeNodeInfo(dst *netmap.NodeInfo, itemNode stackitem.Item) error {
default:
return fmt.Errorf("unsupported state %v", node.State)
case netmapcontract.NodeStateOnline:
- dst.SetOnline()
+ dst.SetStatus(netmap.Online)
case netmapcontract.NodeStateOffline:
- dst.SetOffline()
+ dst.SetStatus(netmap.Offline)
case netmapcontract.NodeStateMaintenance:
- dst.SetMaintenance()
+ dst.SetStatus(netmap.Maintenance)
}
return nil
diff --git a/pkg/morph/client/netmap/netmap_test.go b/pkg/morph/client/netmap/netmap_test.go
index d6172a6825..e686e271e2 100644
--- a/pkg/morph/client/netmap/netmap_test.go
+++ b/pkg/morph/client/netmap/netmap_test.go
@@ -1,8 +1,8 @@
package netmap
import (
+ "crypto/rand"
"math/big"
- "math/rand"
"strconv"
"testing"
@@ -20,11 +20,11 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
switch i % 3 {
default:
- expected[i].SetOffline()
+ expected[i].SetStatus(netmap.Offline)
case int(netmapcontract.NodeStateOnline):
- expected[i].SetOnline()
+ expected[i].SetStatus(netmap.Online)
case int(netmapcontract.NodeStateMaintenance):
- expected[i].SetMaintenance()
+ expected[i].SetStatus(netmap.Maintenance)
}
expected[i].SetPublicKey(pub)
@@ -38,12 +38,12 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
var state int64
- switch {
- case expected[i].IsOnline():
+ switch expected[i].Status() {
+ case netmap.Online:
state = int64(netmapcontract.NodeStateOnline)
- case expected[i].IsOffline():
+ case netmap.Offline:
state = int64(netmapcontract.NodeStateOffline)
- case expected[i].IsMaintenance():
+ case netmap.Maintenance:
state = int64(netmapcontract.NodeStateMaintenance)
}
diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go
index 0b4d31b1db..341b20935d 100644
--- a/pkg/morph/client/netmap/new_epoch.go
+++ b/pkg/morph/client/netmap/new_epoch.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -8,13 +9,32 @@ import (
// NewEpoch updates FrostFS epoch number through
// Netmap contract call.
-func (c *Client) NewEpoch(epoch uint64) error {
+func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
- if err := c.client.Invoke(prm); err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
+ _, err := c.client.Invoke(ctx, prm)
+ if err != nil {
+ return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
}
return nil
}
+
+// NewEpochControl updates FrostFS epoch number through
+// control notary transaction internally to ensure all
+// nodes produce the same transaction with high probability.
+// If vub > 0, vub will be used as valid until block value.
+func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) {
+ prm := client.InvokePrm{}
+ prm.SetMethod(newEpochMethod)
+ prm.SetArgs(epoch)
+ prm.SetControlTX(true)
+ prm.SetVUB(vub)
+
+ res, err := c.client.Invoke(ctx, prm)
+ if err != nil {
+ return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
+ }
+ return res.VUB, nil
+}
diff --git a/pkg/morph/client/netmap/add_peer.go b/pkg/morph/client/netmap/peer.go
similarity index 50%
rename from pkg/morph/client/netmap/add_peer.go
rename to pkg/morph/client/netmap/peer.go
index dc6c255409..e83acde390 100644
--- a/pkg/morph/client/netmap/add_peer.go
+++ b/pkg/morph/client/netmap/peer.go
@@ -1,12 +1,16 @@
package netmap
import (
+ "context"
+ "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
+var errFailedToRemovePeerWithoutNotary = errors.New("peer can be forcefully removed only in notary environment")
+
// AddPeerPrm groups parameters of AddPeer operation.
type AddPeerPrm struct {
nodeInfo netmap.NodeInfo
@@ -21,8 +25,8 @@ func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) {
// AddPeer registers peer in FrostFS network through
// Netmap contract call.
-func (c *Client) AddPeer(p AddPeerPrm) error {
- var method = addPeerMethod
+func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error {
+ method := addPeerMethod
if c.client.WithNotary() && c.client.IsAlpha() {
// In notary environments Alphabet must calls AddPeerIR method instead of AddPeer.
@@ -36,8 +40,27 @@ func (c *Client) AddPeer(p AddPeerPrm) error {
prm.SetArgs(p.nodeInfo.Marshal())
prm.InvokePrmOptional = p.InvokePrmOptional
- if err := c.client.Invoke(prm); err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", method, err)
+ if _, err := c.client.Invoke(ctx, prm); err != nil {
+ return fmt.Errorf("invoke method (%s): %w", method, err)
}
return nil
}
+
+// ForceRemovePeer marks the given peer as offline via a notary control transaction.
+// If vub > 0, vub will be used as valid until block value.
+func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
+ if !c.client.WithNotary() {
+ return 0, errFailedToRemovePeerWithoutNotary
+ }
+
+ prm := UpdatePeerPrm{}
+ prm.SetKey(nodeInfo.PublicKey())
+ prm.SetControlTX(true)
+ prm.SetVUB(vub)
+
+ res, err := c.UpdatePeerState(ctx, prm)
+ if err != nil {
+ return 0, fmt.Errorf("updating peer state: %v", err)
+ }
+ return res.VUB, nil
+}
diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go
index ba2c26af78..9dbec1a90f 100644
--- a/pkg/morph/client/netmap/snapshot.go
+++ b/pkg/morph/client/netmap/snapshot.go
@@ -1,19 +1,22 @@
package netmap
import (
+ "context"
+ "fmt"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
// GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response.
-func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(snapshotMethod)
prm.SetArgs(diff)
- res, err := c.client.TestInvoke(prm)
+ res, err := c.client.TestInvoke(ctx, prm)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err)
}
return DecodeNetMap(res)
diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go
index 02967453bb..f9f639c199 100644
--- a/pkg/morph/client/netmap/update_state.go
+++ b/pkg/morph/client/netmap/update_state.go
@@ -1,7 +1,7 @@
package netmap
import (
- "fmt"
+ "context"
"git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -36,7 +36,7 @@ func (u *UpdatePeerPrm) SetMaintenance() {
}
// UpdatePeerState changes peer status through Netmap contract call.
-func (c *Client) UpdatePeerState(p UpdatePeerPrm) error {
+func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.InvokeRes, error) {
method := updateStateMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -55,8 +55,5 @@ func (c *Client) UpdatePeerState(p UpdatePeerPrm) error {
prm.SetArgs(int64(p.state), p.key)
prm.InvokePrmOptional = p.InvokePrmOptional
- if err := c.client.Invoke(prm); err != nil {
- return fmt.Errorf("could not invoke smart contract: %w", err)
- }
- return nil
+ return c.client.Invoke(ctx, prm)
}
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index 0a23aa47ac..bc00eb8890 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -5,23 +5,20 @@ import (
"fmt"
"math/big"
"strconv"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
+ nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
)
const (
nnsContractID = 1 // NNS contract must be deployed first in the sidechain
- // NNSAuditContractName is a name of the audit contract in NNS.
- NNSAuditContractName = "audit.frostfs"
// NNSBalanceContractName is a name of the balance contract in NNS.
NNSBalanceContractName = "balance.frostfs"
// NNSContainerContractName is a name of the container contract in NNS.
@@ -32,20 +29,14 @@ const (
NNSNetmapContractName = "netmap.frostfs"
// NNSProxyContractName is a name of the proxy contract in NNS.
NNSProxyContractName = "proxy.frostfs"
- // NNSReputationContractName is a name of the reputation contract in NNS.
- NNSReputationContractName = "reputation.frostfs"
- // NNSSubnetworkContractName is a name of the subnet contract in NNS.
- NNSSubnetworkContractName = "subnet.frostfs"
// NNSGroupKeyName is a name for the FrostFS group key record in NNS.
NNSGroupKeyName = "group.frostfs"
+ // NNSPolicyContractName is a name of the policy contract in NNS.
+ NNSPolicyContractName = "policy.frostfs"
)
-var (
- // ErrNNSRecordNotFound means that there is no such record in NNS contract.
- ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
-
- errEmptyResultStack = errors.New("returned result stack is empty")
-)
+// ErrNNSRecordNotFound means that there is no such record in NNS contract.
+var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
// NNSAlphabetContractName returns contract name of the alphabet contract in NNS
// based on alphabet index.
@@ -64,89 +55,36 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) {
return util.Uint160{}, ErrConnectionLost
}
- nnsHash, err := c.NNSHash()
- if err != nil {
- return util.Uint160{}, err
- }
-
- sh, err = nnsResolve(c.client, nnsHash, name)
+ sh, err = nnsResolve(c.nnsReader, name)
if err != nil {
return sh, fmt.Errorf("NNS.resolve: %w", err)
}
return sh, nil
}
-// NNSHash returns NNS contract hash.
-func (c *Client) NNSHash() (util.Uint160, error) {
- c.switchLock.RLock()
- defer c.switchLock.RUnlock()
-
- if c.inactive {
- return util.Uint160{}, ErrConnectionLost
- }
-
- nnsHash := c.cache.nns()
-
- if nnsHash == nil {
- cs, err := c.client.GetContractStateByID(nnsContractID)
- if err != nil {
- return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err)
- }
-
- c.cache.setNNSHash(cs.Hash)
- nnsHash = &cs.Hash
- }
- return *nnsHash, nil
-}
-
-func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
- found, err := exists(c, nnsHash, domain)
+func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) {
+ available, err := r.IsAvailable(domain)
if err != nil {
- return nil, fmt.Errorf("could not check presence in NNS contract for %s: %w", domain, err)
+ return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err)
}
- if !found {
+ if available {
return nil, ErrNNSRecordNotFound
}
- result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{
- {
- Type: smartcontract.StringType,
- Value: domain,
- },
- {
- Type: smartcontract.IntegerType,
- Value: big.NewInt(int64(nns.TXT)),
- },
- }, nil)
- if err != nil {
- return nil, err
- }
- if result.State != vmstate.Halt.String() {
- return nil, fmt.Errorf("invocation failed: %s", result.FaultException)
- }
- if len(result.Stack) == 0 {
- return nil, errEmptyResultStack
- }
- return result.Stack[0], nil
+ return r.Resolve(domain, big.NewInt(int64(nns.TXT)))
}
-func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) {
- res, err := nnsResolveItem(c, nnsHash, domain)
+func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) {
+ arr, err := nnsResolveItem(r, domain)
if err != nil {
return util.Uint160{}, err
}
- // Parse the result of resolving NNS record.
- // It works with multiple formats (corresponding to multiple NNS versions).
- // If array of hashes is provided, it returns only the first one.
- if arr, ok := res.Value().([]stackitem.Item); ok {
- if len(arr) == 0 {
- return util.Uint160{}, errors.New("NNS record is missing")
- }
- res = arr[0]
+ if len(arr) == 0 {
+ return util.Uint160{}, errors.New("NNS record is missing")
}
- bs, err := res.TryBytes()
+ bs, err := arr[0].TryBytes()
if err != nil {
return util.Uint160{}, fmt.Errorf("malformed response: %w", err)
}
@@ -166,33 +104,6 @@ func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (uti
return util.Uint160{}, errors.New("no valid hashes are found")
}
-func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) {
- result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{
- {
- Type: smartcontract.StringType,
- Value: domain,
- },
- }, nil)
- if err != nil {
- return false, err
- }
-
- if len(result.Stack) == 0 {
- return false, errEmptyResultStack
- }
-
- res := result.Stack[0]
-
- available, err := res.TryBool()
- if err != nil {
- return false, fmt.Errorf("malformed response: %w", err)
- }
-
- // not available means that it is taken
- // and, therefore, exists
- return !available, nil
-}
-
// SetGroupSignerScope makes the default signer scope include all FrostFS contracts.
// Should be called for side-chain client only.
func (c *Client) SetGroupSignerScope() error {
@@ -208,29 +119,40 @@ func (c *Client) SetGroupSignerScope() error {
return err
}
- c.signer.Scopes = transaction.CustomGroups
- c.signer.AllowedGroups = []*keys.PublicKey{pub}
+ // Don't change c before everything is OK.
+ cfg := c.cfg
+ cfg.signer = &transaction.Signer{
+ Scopes: transaction.CustomGroups | transaction.CalledByEntry,
+ AllowedGroups: []*keys.PublicKey{pub},
+ }
+ rpcActor, err := newActor(c.client, c.acc, cfg)
+ if err != nil {
+ return err
+ }
+ c.cfg = cfg
+ c.setActor(rpcActor)
return nil
}
// contractGroupKey returns public key designating FrostFS contract group.
func (c *Client) contractGroupKey() (*keys.PublicKey, error) {
+ success := false
+ startedAt := time.Now()
+ defer func() {
+ c.cache.metrics.AddMethodDuration("GroupKey", success, time.Since(startedAt))
+ }()
+
if gKey := c.cache.groupKey(); gKey != nil {
+ success = true
return gKey, nil
}
- nnsHash, err := c.NNSHash()
+ arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName)
if err != nil {
return nil, err
}
- item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName)
- if err != nil {
- return nil, err
- }
-
- arr, ok := item.Value().([]stackitem.Item)
- if !ok || len(arr) == 0 {
+ if len(arr) == 0 {
return nil, errors.New("NNS record is missing")
}
@@ -245,5 +167,7 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) {
}
c.cache.setGroupKey(pub)
+
+ success = true
return pub, nil
}
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index a1980e2d9b..4487026132 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -1,13 +1,17 @@
package client
import (
+ "context"
+ "crypto/elliptic"
"encoding/binary"
"errors"
"fmt"
"math"
"math/big"
"strings"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@@ -16,30 +20,31 @@ import (
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/neorpc"
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/notary"
sc "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+ "github.com/nspcc-dev/neo-go/pkg/vm"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"go.uber.org/zap"
)
type (
notaryInfo struct {
- txValidTime uint32 // minimum amount of blocks when mainTx will be valid
- roundTime uint32 // extra amount of blocks to synchronize sidechain height diff of inner ring nodes
- fallbackTime uint32 // mainTx's ValidUntilBlock - fallbackTime + 1 is when fallbackTx is sent
+ txValidTime uint32 // minimum amount of blocks when mainTx will be valid
+ roundTime uint32 // extra amount of blocks to synchronize sidechain height diff of inner ring nodes
alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness
- notary util.Uint160
- proxy util.Uint160
+ proxy util.Uint160
}
notaryCfg struct {
proxy util.Uint160
- txValidTime, roundTime, fallbackTime uint32
+ txValidTime, roundTime uint32
alphabetSource AlphabetKeys
}
@@ -49,25 +54,18 @@ type (
)
const (
- defaultNotaryValidTime = 50
- defaultNotaryRoundTime = 100
- defaultNotaryFallbackTime = 40
+ defaultNotaryValidTime = 50
+ defaultNotaryRoundTime = 100
- notaryBalanceOfMethod = "balanceOf"
- notaryExpirationOfMethod = "expirationOf"
- setDesignateMethod = "designateAsRole"
+ setDesignateMethod = "designateAsRole"
- notaryBalanceErrMsg = "can't fetch notary balance"
notaryNotEnabledPanicMsg = "notary support was not enabled on this client"
)
-var errUnexpectedItems = errors.New("invalid number of NEO VM arguments on stack")
-
func defaultNotaryConfig(c *Client) *notaryCfg {
return ¬aryCfg{
txValidTime: defaultNotaryValidTime,
roundTime: defaultNotaryRoundTime,
- fallbackTime: defaultNotaryFallbackTime,
alphabetSource: c.Committee,
}
}
@@ -102,9 +100,7 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error {
proxy: cfg.proxy,
txValidTime: cfg.txValidTime,
roundTime: cfg.roundTime,
- fallbackTime: cfg.fallbackTime,
alphabetSource: cfg.alphabetSource,
- notary: notary.Hash,
}
c.notary = notaryCfg
@@ -138,7 +134,7 @@ func (c *Client) ProbeNotary() (res bool) {
// use this function.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uint256, err error) {
+func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -152,20 +148,18 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return util.Uint256{}, fmt.Errorf("can't get blockchain height: %w", err)
+ return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err)
}
- currentTill, err := c.depositExpirationOf()
+ r := notary.NewReader(c.rpcActor)
+ currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash())
if err != nil {
- return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err)
+ return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err)
}
- till := int64(bc + delta)
- if till < currentTill {
- till = currentTill
- }
-
- return c.depositNotary(amount, till)
+ till := max(int64(bc+delta), int64(currentTill))
+ res, _, err := c.depositNotary(ctx, amount, till)
+ return res, err
}
// DepositEndlessNotary calls notary deposit method. Unlike `DepositNotary`,
@@ -173,12 +167,12 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin
// This allows to avoid ValidAfterDeposit failures.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, err error) {
+func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return util.Uint256{}, ErrConnectionLost
+ return util.Uint256{}, 0, ErrConnectionLost
}
if c.notary == nil {
@@ -186,37 +180,37 @@ func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, e
}
// till value refers to a block height and it is uint32 value in neo-go
- return c.depositNotary(amount, math.MaxUint32)
+ return c.depositNotary(ctx, amount, math.MaxUint32)
}
-func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint256, err error) {
+func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
txHash, vub, err := c.gasToken.Transfer(
c.accAddr,
- c.notary.notary,
+ notary.Hash,
big.NewInt(int64(amount)),
[]any{c.acc.PrivateKey().GetScriptHash(), till})
if err != nil {
if !errors.Is(err, neorpc.ErrAlreadyExists) {
- return util.Uint256{}, fmt.Errorf("can't make notary deposit: %w", err)
+ return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err)
}
// Transaction is already in mempool waiting to be processed.
// This is an expected situation if we restart the service.
- c.logger.Debug("notary deposit has already been made",
+ c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
zap.Error(err))
- return util.Uint256{}, nil
+ return util.Uint256{}, 0, nil
}
- c.logger.Debug("notary deposit invoke",
+ c.logger.Info(ctx, logs.ClientNotaryDepositInvoke,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
- return txHash, nil
+ return txHash, vub, nil
}
// GetNotaryDeposit returns deposit of client's account in notary contract.
@@ -237,18 +231,10 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) {
sh := c.acc.PrivateKey().PublicKey().GetScriptHash()
- items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh)
+ r := notary.NewReader(c.rpcActor)
+ bigIntDeposit, err := r.BalanceOf(sh)
if err != nil {
- return 0, fmt.Errorf("%v: %w", notaryBalanceErrMsg, err)
- }
-
- if len(items) != 1 {
- return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, errUnexpectedItems))
- }
-
- bigIntDeposit, err := items[0].TryInteger()
- if err != nil {
- return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, err))
+ return 0, fmt.Errorf("get notary deposit: %w", err)
}
return bigIntDeposit.Int64(), nil
@@ -275,7 +261,7 @@ func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) {
// committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error {
+func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -287,12 +273,13 @@ func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error {
panic(notaryNotEnabledPanicMsg)
}
- nonce, vub, err := c.CalculateNonceAndVUB(prm.hash)
+ nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
+ ctx,
setDesignateMethod,
nonce,
vub,
@@ -323,7 +310,7 @@ func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) {
// Requires committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
+func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -335,12 +322,13 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
panic(notaryNotEnabledPanicMsg)
}
- nonce, vub, err := c.CalculateNonceAndVUB(prm.hash)
+ nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
if err != nil {
- return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
+ ctx,
setDesignateMethod,
nonce,
vub,
@@ -353,20 +341,22 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
// blockchain. Fallback tx is a `RET`. If Notary support is not enabled
// it fallbacks to a simple `Invoke()`.
//
+// Returns valid until block value.
+//
// `nonce` and `vub` are used only if notary is enabled.
-func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
+func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return ErrConnectionLost
+ return InvokeRes{}, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(contract, fee, method, args...)
+ return c.Invoke(ctx, contract, fee, method, args...)
}
- return c.notaryInvoke(false, true, contract, nonce, vub, method, args...)
+ return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...)
}
// NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's
@@ -374,19 +364,19 @@ func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce ui
// not expected to be signed by the current node.
//
// Considered to be used by non-IR nodes.
-func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) error {
+func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (InvokeRes, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return ErrConnectionLost
+ return InvokeRes{}, ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(contract, fee, method, args...)
+ return c.Invoke(ctx, contract, fee, method, args...)
}
- return c.notaryInvoke(false, false, contract, rand.Uint32(), nil, method, args...)
+ return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...)
}
// NotarySignAndInvokeTX signs and sends notary request that was received from
@@ -403,295 +393,210 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return fmt.Errorf("could not fetch current alphabet keys: %w", err)
+ return fmt.Errorf("fetch current alphabet keys: %w", err)
}
- multiaddrAccount, err := c.notaryMultisigAccount(alphabetList, false, true)
+ cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList)
if err != nil {
return err
}
- // error appears only if client
- // is in inactive mode; that has
- // been already checked above
- magicNumber, _ := c.MagicNumber()
+ nAct, err := notary.NewActor(c.client, cosigners, c.acc)
+ if err != nil {
+ return err
+ }
- // mainTX is expected to be pre-validated: second witness must exist and be empty
- mainTx.Scripts[1].VerificationScript = multiaddrAccount.GetVerificationScript()
- mainTx.Scripts[1].InvocationScript = append(
- []byte{byte(opcode.PUSHDATA1), 64},
- multiaddrAccount.PrivateKey().SignHashable(uint32(magicNumber), mainTx)...,
- )
+ // Sign exactly the same transaction we've got from the received Notary request.
+ err = nAct.Sign(mainTx)
+ if err != nil {
+ return fmt.Errorf("faield to sign notary request: %w", err)
+ }
+
+ mainH, fbH, untilActual, err := nAct.Notarize(mainTx, nil)
- resp, err := c.client.SignAndPushP2PNotaryRequest(mainTx,
- []byte{byte(opcode.RET)},
- -1,
- 0,
- c.notary.fallbackTime,
- c.acc)
if err != nil && !alreadyOnChainError(err) {
return err
}
- c.logger.Debug("notary request with prepared main TX invoked",
- zap.Uint32("fallback_valid_for", c.notary.fallbackTime),
- zap.Stringer("tx_hash", resp.Hash().Reverse()))
+ c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked,
+ zap.String("tx_hash", mainH.StringLE()),
+ zap.Uint32("valid_until_block", untilActual),
+ zap.String("fallback_hash", fbH.StringLE()))
return nil
}
-func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args ...any) error {
+func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error {
designate := c.GetDesignateHash()
- return c.notaryInvoke(true, true, designate, nonce, &vub, method, args...)
+ _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...)
+ return err
}
-func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) error {
- alphabetList, err := c.notary.alphabetSource() // prepare arguments for test invocation
+func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
+ start := time.Now()
+ success := false
+ defer func() {
+ c.metrics.ObserveInvoke("notaryInvoke", contract.String(), method, success, time.Since(start))
+ }()
+
+ alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return err
+ return InvokeRes{}, err
}
- u8n := uint8(len(alphabetList))
-
- if !invokedByAlpha {
- u8n++
+ until, err := c.getUntilValue(vub)
+ if err != nil {
+ return InvokeRes{}, err
}
cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee)
if err != nil {
- return err
+ return InvokeRes{}, err
}
- params, err := invocationParams(args...)
+ nAct, err := notary.NewActor(c.client, cosigners, c.acc)
if err != nil {
- return err
+ return InvokeRes{}, err
}
- // make test invocation of the method
- test, err := c.client.InvokeFunction(contract, method, params, cosigners)
- if err != nil {
- return err
- }
-
- // check invocation state
- if test.State != HaltState {
- return wrapFrostFSError(¬HaltStateError{state: test.State, exception: test.FaultException})
- }
-
- // if test invocation failed, then return error
- if len(test.Script) == 0 {
- return wrapFrostFSError(errEmptyInvocationScript)
- }
-
- // after test invocation we build main multisig transaction
-
- multiaddrAccount, err := c.notaryMultisigAccount(alphabetList, committee, invokedByAlpha)
- if err != nil {
- return err
- }
-
- var until uint32
-
- if vub != nil {
- until = *vub
- } else {
- until, err = c.notaryTxValidationLimit()
- if err != nil {
- return err
+ mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error {
+ if r.State != vmstate.Halt.String() {
+ return ¬HaltStateError{state: r.State, exception: r.FaultException}
}
- }
- // prepare main tx
- mainTx := &transaction.Transaction{
- Nonce: nonce,
- SystemFee: test.GasConsumed,
- ValidUntilBlock: until,
- Script: test.Script,
- Attributes: []transaction.Attribute{
- {
- Type: transaction.NotaryAssistedT,
- Value: &transaction.NotaryAssisted{NKeys: u8n},
- },
- },
- Signers: cosigners,
- }
+ t.ValidUntilBlock = until
+ t.Nonce = nonce
- // calculate notary fee
- notaryFee, err := c.client.CalculateNotaryFee(u8n)
- if err != nil {
- return err
- }
+ return nil
+ }, args...))
- // add network fee for cosigners
- //nolint:staticcheck // waits for neo-go v0.99.3 with notary actors
- err = c.client.AddNetworkFee(
- mainTx,
- notaryFee,
- c.notaryAccounts(invokedByAlpha, multiaddrAccount)...,
- )
- if err != nil {
- return err
- }
-
- // define witnesses
- mainTx.Scripts = c.notaryWitnesses(invokedByAlpha, multiaddrAccount, mainTx)
-
- resp, err := c.client.SignAndPushP2PNotaryRequest(mainTx,
- []byte{byte(opcode.RET)},
- -1,
- 0,
- c.notary.fallbackTime,
- c.acc)
if err != nil && !alreadyOnChainError(err) {
- return err
+ return InvokeRes{}, err
}
- c.logger.Debug("notary request invoked",
+ c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked,
zap.String("method", method),
- zap.Uint32("valid_until_block", until),
- zap.Uint32("fallback_valid_for", c.notary.fallbackTime),
- zap.Stringer("tx_hash", resp.Hash().Reverse()))
+ zap.Uint32("valid_until_block", untilActual),
+ zap.String("tx_hash", mainH.StringLE()),
+ zap.String("fallback_hash", fbH.StringLE()))
- return nil
+ success = true
+ return InvokeRes{Hash: mainH, VUB: until}, nil
}
-func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, committee bool) ([]transaction.Signer, error) {
- s := make([]transaction.Signer, 0, 4)
-
- // first we have proxy contract signature, as it will pay for the execution
- s = append(s, transaction.Signer{
- Account: c.notary.proxy,
- Scopes: transaction.None,
- })
-
- // then we have inner ring multiaddress signature
- m := sigCount(ir, committee)
-
- multisigScript, err := sc.CreateMultiSigRedeemScript(m, ir)
+func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) {
+ multiaddrAccount, err := c.notaryMultisigAccount(alphabetList, false, true)
if err != nil {
- // wrap error as FrostFS-specific since the call is not related to any client
- return nil, wrapFrostFSError(fmt.Errorf("can't create ir multisig redeem script: %w", err))
+ return nil, err
}
- s = append(s, transaction.Signer{
- Account: hash.Hash160(multisigScript),
- Scopes: c.signer.Scopes,
- AllowedContracts: c.signer.AllowedContracts,
- AllowedGroups: c.signer.AllowedGroups,
- })
-
- if !invokedByAlpha {
- // then we have invoker signature
- s = append(s, transaction.Signer{
- Account: hash.Hash160(c.acc.GetVerificationScript()),
- Scopes: c.signer.Scopes,
- AllowedContracts: c.signer.AllowedContracts,
- AllowedGroups: c.signer.AllowedGroups,
+ // Here we need to add a committee signature (second witness) to the pre-validated
+ // main transaction without creating a new one. However, Notary actor demands the
+ // proper set of signers for constructor, thus, fill it from the main transaction's signers list.
+ s := make([]actor.SignerAccount, 2, 3)
+ s[0] = actor.SignerAccount{
+ // Proxy contract that will pay for the execution.
+ Signer: mainTx.Signers[0],
+ Account: notary.FakeContractAccount(mainTx.Signers[0].Account),
+ }
+ s[1] = actor.SignerAccount{
+ // Inner ring multisignature.
+ Signer: mainTx.Signers[1],
+ Account: multiaddrAccount,
+ }
+ if len(mainTx.Signers) > 3 {
+ // Invoker signature (simple signature account of storage node is expected).
+ var acc *wallet.Account
+ script := mainTx.Scripts[2].VerificationScript
+ if len(script) == 0 {
+ acc = notary.FakeContractAccount(mainTx.Signers[2].Account)
+ } else {
+ pubBytes, ok := vm.ParseSignatureContract(script)
+ if ok {
+ pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err)
+ }
+ acc = notary.FakeSimpleAccount(pub)
+ } else {
+ m, pubsBytes, ok := vm.ParseMultiSigContract(script)
+ if !ok {
+ return nil, errors.New("parse verification script of signer #2: unknown witness type")
+ }
+ pubs := make(keys.PublicKeys, len(pubsBytes))
+ for i := range pubs {
+ pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err)
+ }
+ }
+ acc, err = notary.FakeMultisigAccount(m, pubs)
+ if err != nil {
+ return nil, fmt.Errorf("create fake account for signer #2: %w", err)
+ }
+ }
+ }
+ s = append(s, actor.SignerAccount{
+ Signer: mainTx.Signers[2],
+ Account: acc,
})
}
- // last one is a placeholder for notary contract signature
- s = append(s, transaction.Signer{
- Account: c.notary.notary,
- Scopes: transaction.None,
- })
-
return s, nil
}
-func (c *Client) notaryAccounts(invokedByAlpha bool, multiaddr *wallet.Account) []*wallet.Account {
- if multiaddr == nil {
- return nil
+func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, committee bool) ([]actor.SignerAccount, error) {
+ multiaddrAccount, err := c.notaryMultisigAccount(ir, committee, invokedByAlpha)
+ if err != nil {
+ return nil, err
+ }
+ s := make([]actor.SignerAccount, 2, 3)
+ // Proxy contract that will pay for the execution.
+ s[0] = actor.SignerAccount{
+ Signer: transaction.Signer{
+ Account: c.notary.proxy,
+ // Do not change this:
+ // We must be able to call NNS contract indirectly from the Container contract.
+ // Thus, CalledByEntry is not sufficient.
+ // In future we may restrict this to all the usecases we have.
+ Scopes: transaction.Global,
+ },
+ Account: notary.FakeContractAccount(c.notary.proxy),
+ }
+ // Inner ring multisignature.
+ s[1] = actor.SignerAccount{
+ Signer: transaction.Signer{
+ Account: multiaddrAccount.ScriptHash(),
+ Scopes: c.cfg.signer.Scopes,
+ AllowedContracts: c.cfg.signer.AllowedContracts,
+ AllowedGroups: c.cfg.signer.AllowedGroups,
+ },
+ Account: multiaddrAccount,
}
-
- a := make([]*wallet.Account, 0, 4)
-
- // first we have proxy account, as it will pay for the execution
- a = append(a, notary.FakeContractAccount(c.notary.proxy))
-
- // then we have inner ring multiaddress account
- a = append(a, multiaddr)
if !invokedByAlpha {
- // then we have invoker account
- a = append(a, c.acc)
- }
-
- // last one is a placeholder for notary contract account
- a = append(a, &wallet.Account{
- Contract: &wallet.Contract{},
- })
-
- return a
-}
-
-func (c *Client) notaryWitnesses(invokedByAlpha bool, multiaddr *wallet.Account, tx *transaction.Transaction) []transaction.Witness {
- if multiaddr == nil || tx == nil {
- return nil
- }
-
- w := make([]transaction.Witness, 0, 4)
-
- // first we have empty proxy witness, because notary will execute `Verify`
- // method on the proxy contract to check witness
- w = append(w, transaction.Witness{
- InvocationScript: []byte{},
- VerificationScript: []byte{},
- })
-
- // then we have inner ring multiaddress witness
-
- // invocation script should be of the form:
- // { PUSHDATA1, 64, signatureBytes... }
- // to pass Notary module verification
- var invokeScript []byte
-
- magicNumber, _ := c.MagicNumber()
-
- if invokedByAlpha {
- invokeScript = append(
- []byte{byte(opcode.PUSHDATA1), 64},
- multiaddr.PrivateKey().SignHashable(uint32(magicNumber), tx)...,
- )
- } else {
- // we can't provide alphabet node signature
- // because Storage Node doesn't own alphabet's
- // private key. Thus, add dummy witness with
- // empty bytes instead of signature
- invokeScript = append(
- []byte{byte(opcode.PUSHDATA1), 64},
- make([]byte, 64)...,
- )
- }
-
- w = append(w, transaction.Witness{
- InvocationScript: invokeScript,
- VerificationScript: multiaddr.GetVerificationScript(),
- })
-
- if !invokedByAlpha {
- // then we have invoker witness
- invokeScript = append(
- []byte{byte(opcode.PUSHDATA1), 64},
- c.acc.PrivateKey().SignHashable(uint32(magicNumber), tx)...,
- )
-
- w = append(w, transaction.Witness{
- InvocationScript: invokeScript,
- VerificationScript: c.acc.GetVerificationScript(),
+ // Invoker signature.
+ s = append(s, actor.SignerAccount{
+ Signer: transaction.Signer{
+ Account: hash.Hash160(c.acc.GetVerificationScript()),
+ Scopes: c.cfg.signer.Scopes,
+ AllowedContracts: c.cfg.signer.AllowedContracts,
+ AllowedGroups: c.cfg.signer.AllowedGroups,
+ },
+ Account: c.acc,
})
}
- // last one is a placeholder for notary contract witness
- w = append(w, transaction.Witness{
- InvocationScript: append(
- []byte{byte(opcode.PUSHDATA1), 64},
- make([]byte, 64)...,
- ),
- VerificationScript: []byte{},
- })
+ // The last one is Notary contract that will be added to the signers list
+ // by Notary actor automatically.
+ return s, nil
+}
- return w
+func (c *Client) getUntilValue(vub *uint32) (uint32, error) {
+ if vub != nil {
+ return *vub, nil
+ }
+ return c.notaryTxValidationLimit()
}
func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedByAlpha bool) (*wallet.Account, error) {
@@ -703,8 +608,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey())
err := multisigAccount.ConvertMultisig(m, ir)
if err != nil {
- // wrap error as FrostFS-specific since the call is not related to any client
- return nil, wrapFrostFSError(fmt.Errorf("can't convert account to inner ring multisig wallet: %w", err))
+ return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err)
}
} else {
// alphabet multisig redeem script is
@@ -712,8 +616,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
// inner ring multiaddress witness
multisigAccount, err = notary.FakeMultisigAccount(m, ir)
if err != nil {
- // wrap error as FrostFS-specific since the call is not related to any client
- return nil, wrapFrostFSError(fmt.Errorf("can't make inner ring multisig wallet: %w", err))
+ return nil, fmt.Errorf("make inner ring multisig wallet: %w", err)
}
}
@@ -723,48 +626,15 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
func (c *Client) notaryTxValidationLimit() (uint32, error) {
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return 0, fmt.Errorf("can't get current blockchain height: %w", err)
+ return 0, fmt.Errorf("get current blockchain height: %w", err)
}
- min := bc + c.notary.txValidTime
- rounded := (min/c.notary.roundTime + 1) * c.notary.roundTime
+ minTime := bc + c.notary.txValidTime
+ rounded := (minTime/c.notary.roundTime + 1) * c.notary.roundTime
return rounded, nil
}
-func (c *Client) depositExpirationOf() (int64, error) {
- expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash())
- if err != nil {
- return 0, fmt.Errorf("can't invoke method: %w", err)
- }
-
- if len(expirationRes) != 1 {
- return 0, fmt.Errorf("method returned unexpected item count: %d", len(expirationRes))
- }
-
- currentTillBig, err := expirationRes[0].TryInteger()
- if err != nil {
- return 0, fmt.Errorf("can't parse deposit till value: %w", err)
- }
-
- return currentTillBig.Int64(), nil
-}
-
-func invocationParams(args ...any) ([]sc.Parameter, error) {
- params := make([]sc.Parameter, 0, len(args))
-
- for i := range args {
- param, err := toStackParameter(args[i])
- if err != nil {
- return nil, err
- }
-
- params = append(params, param)
- }
-
- return params, nil
-}
-
// sigCount returns the number of required signature.
// For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT).
// If committee is true, returns M as N/2+1.
@@ -792,15 +662,6 @@ func WithRoundTime(t uint32) NotaryOption {
}
}
-// WithFallbackTime returns a notary support option for client
-// that specifies amount of blocks before fallbackTx will be sent.
-// Should be less than TxValidTime.
-func WithFallbackTime(t uint32) NotaryOption {
- return func(c *notaryCfg) {
- c.fallbackTime = t
- }
-}
-
// WithAlphabetSource returns a notary support option for client
// that specifies function to return list of alphabet node keys.
// By default notary subsystem uses committee as a source. This is
@@ -818,13 +679,17 @@ func WithProxyContract(h util.Uint160) NotaryOption {
}
}
-// Neo RPC node can return `core.ErrInvalidAttribute` error with
+// Neo RPC node can return `neorpc.ErrInvalidAttribute` error with
// `conflicting transaction <> is already on chain` message. This
// error is expected and ignored. As soon as main tx persisted on
// chain everything is fine. This happens because notary contract
// requires 5 out of 7 signatures to send main tx, thus last two
// notary requests may be processed after main tx appeared on chain.
func alreadyOnChainError(err error) bool {
+ if !errors.Is(err, neorpc.ErrInvalidAttribute) {
+ return false
+ }
+
const alreadyOnChainErrorMessage = "already on chain"
return strings.Contains(err.Error(), alreadyOnChainErrorMessage)
@@ -843,12 +708,12 @@ func alreadyOnChainError(err error) bool {
func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) {
notaryBalance, err := c.GetNotaryDeposit()
if err != nil {
- return 0, fmt.Errorf("could not get notary balance: %w", err)
+ return 0, fmt.Errorf("get notary balance: %w", err)
}
gasBalance, err := c.GasBalance()
if err != nil {
- return 0, fmt.Errorf("could not get GAS balance: %w", err)
+ return 0, fmt.Errorf("get GAS balance: %w", err)
}
if gasBalance == 0 {
@@ -868,7 +733,19 @@ func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed
// CalculateNonceAndVUB calculates nonce and ValidUntilBlock values
// based on transaction hash.
-func (c *Client) CalculateNonceAndVUB(hash util.Uint256) (nonce uint32, vub uint32, err error) {
+func (c *Client) CalculateNonceAndVUB(hash *util.Uint256) (nonce uint32, vub uint32, err error) {
+ return c.calculateNonceAndVUB(hash, false)
+}
+
+// CalculateNonceAndVUBControl calculates nonce and rounded ValidUntilBlock values
+// based on transaction hash for use in control transactions.
+func (c *Client) CalculateNonceAndVUBControl(hash *util.Uint256) (nonce uint32, vub uint32, err error) {
+ return c.calculateNonceAndVUB(hash, true)
+}
+
+// If hash specified, transaction's height and hash are used to compute VUB and nonce.
+// If not, then current block height used to compute VUB and nonce.
+func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool) (nonce uint32, vub uint32, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -880,18 +757,43 @@ func (c *Client) CalculateNonceAndVUB(hash util.Uint256) (nonce uint32, vub uint
return 0, 0, nil
}
- nonce = binary.LittleEndian.Uint32(hash.BytesLE())
+ var height uint32
- height, err := c.getTransactionHeight(hash)
- if err != nil {
- return 0, 0, fmt.Errorf("could not get transaction height: %w", err)
+ if hash != nil {
+ height, err = c.getTransactionHeight(*hash)
+ if err != nil {
+ return 0, 0, fmt.Errorf("get transaction height: %w", err)
+ }
+ } else {
+ height, err = c.rpcActor.GetBlockCount()
+ if err != nil {
+ return 0, 0, fmt.Errorf("get chain height: %w", err)
+ }
}
- return nonce, height + c.notary.txValidTime, nil
+ // For control transactions, we round down the block height to control the
+ // probability of all nodes producing the same transaction, since it depends
+ // on this value.
+ if roundBlockHeight {
+ inc := c.rpcActor.GetVersion().Protocol.MaxValidUntilBlockIncrement
+ height = height / inc * inc
+ }
+
+ if hash != nil {
+ return binary.LittleEndian.Uint32(hash.BytesLE()), height + c.notary.txValidTime, nil
+ }
+ return height + c.notary.txValidTime, height + c.notary.txValidTime, nil
}
func (c *Client) getTransactionHeight(h util.Uint256) (uint32, error) {
+ success := false
+ startedAt := time.Now()
+ defer func() {
+ c.cache.metrics.AddMethodDuration("TxHeight", success, time.Since(startedAt))
+ }()
+
if rh, ok := c.cache.txHeights.Get(h); ok {
+ success = true
return rh, nil
}
height, err := c.client.GetTransactionHeight(h)
@@ -899,5 +801,6 @@ func (c *Client) getTransactionHeight(h util.Uint256) (uint32, error) {
return 0, err
}
c.cache.txHeights.Add(h, height)
+ success = true
return height, nil
}
diff --git a/pkg/morph/client/notifications.go b/pkg/morph/client/notifications.go
index 2afeebb832..35204bb366 100644
--- a/pkg/morph/client/notifications.go
+++ b/pkg/morph/client/notifications.go
@@ -1,9 +1,11 @@
package client
import (
- "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/core/block"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/neorpc"
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/util"
- "go.uber.org/zap"
)
// Close closes connection to the remote side making
@@ -14,74 +16,52 @@ func (c *Client) Close() {
// closing should be done via the channel
// to prevent switching to another RPC node
// in the notification loop
- close(c.closeChan)
+ if c.closed.CompareAndSwap(false, true) {
+ close(c.closeChan)
+ }
+ c.wg.Wait()
}
-// SubscribeForExecutionNotifications adds subscription for notifications
-// generated during contract transaction execution to this instance of client.
+// ReceiveExecutionNotifications performs subscription for notifications
+// generated during contract execution. Events are sent to the specified channel.
//
// Returns ErrConnectionLost if client has not been able to establish
// connection to any of passed RPC endpoints.
-func (c *Client) SubscribeForExecutionNotifications(contract util.Uint160) error {
+func (c *Client) ReceiveExecutionNotifications(contract util.Uint160, ch chan<- *state.ContainedNotificationEvent) (string, error) {
c.switchLock.Lock()
defer c.switchLock.Unlock()
if c.inactive {
- return ErrConnectionLost
+ return "", ErrConnectionLost
}
- _, subscribed := c.subscribedEvents[contract]
- if subscribed {
- // no need to subscribe one more time
- return nil
- }
-
- id, err := c.client.SubscribeForExecutionNotifications(&contract, nil)
- if err != nil {
- return err
- }
-
- c.subscribedEvents[contract] = id
-
- return nil
+ return c.client.ReceiveExecutionNotifications(&neorpc.NotificationFilter{Contract: &contract}, ch)
}
-// SubscribeForNewBlocks adds subscription for new block events to this
-// instance of client.
+// ReceiveBlocks performs subscription for new block events. Events are sent
+// to the specified channel.
//
// Returns ErrConnectionLost if client has not been able to establish
// connection to any of passed RPC endpoints.
-func (c *Client) SubscribeForNewBlocks() error {
+func (c *Client) ReceiveBlocks(ch chan<- *block.Block) (string, error) {
c.switchLock.Lock()
defer c.switchLock.Unlock()
if c.inactive {
- return ErrConnectionLost
+ return "", ErrConnectionLost
}
- if c.subscribedToNewBlocks {
- // no need to subscribe one more time
- return nil
- }
-
- _, err := c.client.SubscribeForNewBlocks(nil)
- if err != nil {
- return err
- }
-
- c.subscribedToNewBlocks = true
-
- return nil
+ return c.client.ReceiveBlocks(nil, ch)
}
-// SubscribeForNotaryRequests adds subscription for notary request payloads
+// ReceiveNotaryRequests performsn subscription for notary request payloads
// addition or removal events to this instance of client. Passed txSigner is
// used as filter: subscription is only for the notary requests that must be
-// signed by txSigner.
+// signed by txSigner. Events are sent to the specified channel.
//
// Returns ErrConnectionLost if client has not been able to establish
// connection to any of passed RPC endpoints.
-func (c *Client) SubscribeForNotaryRequests(txSigner util.Uint160) error {
+func (c *Client) ReceiveNotaryRequests(txSigner util.Uint160, ch chan<- *result.NotaryRequestEvent) (string, error) {
if c.notary == nil {
panic(notaryNotEnabledPanicMsg)
}
@@ -90,30 +70,17 @@ func (c *Client) SubscribeForNotaryRequests(txSigner util.Uint160) error {
defer c.switchLock.Unlock()
if c.inactive {
- return ErrConnectionLost
+ return "", ErrConnectionLost
}
- _, subscribed := c.subscribedNotaryEvents[txSigner]
- if subscribed {
- // no need to subscribe one more time
- return nil
- }
-
- id, err := c.client.SubscribeForNotaryRequests(nil, &txSigner)
- if err != nil {
- return err
- }
-
- c.subscribedNotaryEvents[txSigner] = id
-
- return nil
+ return c.client.ReceiveNotaryRequests(&neorpc.NotaryRequestFilter{Signer: &txSigner}, ch)
}
-// UnsubscribeContract removes subscription for given contract event stream.
+// Unsubscribe performs unsubscription for the given subscription ID.
//
// Returns ErrConnectionLost if client has not been able to establish
// connection to any of passed RPC endpoints.
-func (c *Client) UnsubscribeContract(contract util.Uint160) error {
+func (c *Client) Unsubscribe(subID string) error {
c.switchLock.Lock()
defer c.switchLock.Unlock()
@@ -121,55 +88,7 @@ func (c *Client) UnsubscribeContract(contract util.Uint160) error {
return ErrConnectionLost
}
- _, subscribed := c.subscribedEvents[contract]
- if !subscribed {
- // no need to unsubscribe contract
- // without subscription
- return nil
- }
-
- err := c.client.Unsubscribe(c.subscribedEvents[contract])
- if err != nil {
- return err
- }
-
- delete(c.subscribedEvents, contract)
-
- return nil
-}
-
-// UnsubscribeNotaryRequest removes subscription for given notary requests
-// signer.
-//
-// Returns ErrConnectionLost if client has not been able to establish
-// connection to any of passed RPC endpoints.
-func (c *Client) UnsubscribeNotaryRequest(signer util.Uint160) error {
- if c.notary == nil {
- panic(notaryNotEnabledPanicMsg)
- }
-
- c.switchLock.Lock()
- defer c.switchLock.Unlock()
-
- if c.inactive {
- return ErrConnectionLost
- }
-
- _, subscribed := c.subscribedNotaryEvents[signer]
- if !subscribed {
- // no need to unsubscribe signer's
- // requests without subscription
- return nil
- }
-
- err := c.client.Unsubscribe(c.subscribedNotaryEvents[signer])
- if err != nil {
- return err
- }
-
- delete(c.subscribedNotaryEvents, signer)
-
- return nil
+ return c.client.Unsubscribe(subID)
}
// UnsubscribeAll removes all active subscriptions of current client.
@@ -184,102 +103,6 @@ func (c *Client) UnsubscribeAll() error {
return ErrConnectionLost
}
- // no need to unsubscribe if there are
- // no active subscriptions
- if len(c.subscribedEvents) == 0 && len(c.subscribedNotaryEvents) == 0 &&
- !c.subscribedToNewBlocks {
- return nil
- }
-
err := c.client.UnsubscribeAll()
- if err != nil {
- return err
- }
-
- c.subscribedEvents = make(map[util.Uint160]string)
- c.subscribedNotaryEvents = make(map[util.Uint160]string)
- c.subscribedToNewBlocks = false
-
- return nil
-}
-
-// restoreSubscriptions restores subscriptions according to
-// cached information about them.
-func (c *Client) restoreSubscriptions(cli *rpcclient.WSClient, endpoint string) bool {
- var (
- err error
- id string
- )
-
- stopCh := make(chan struct{})
- defer close(stopCh)
-
- // neo-go WS client says to _always_ read notifications
- // from its channel. Subscribing to any notification
- // while not reading them in another goroutine may
- // lead to a dead-lock, thus that async side notification
- // listening while restoring subscriptions
- go func() {
- for {
- select {
- case <-stopCh:
- return
- case n, ok := <-cli.Notifications:
- if !ok {
- return
- }
-
- c.notifications <- n
- }
- }
- }()
-
- // new block events restoration
- if c.subscribedToNewBlocks {
- _, err = cli.SubscribeForNewBlocks(nil)
- if err != nil {
- c.logger.Error("could not restore block subscription after RPC switch",
- zap.String("endpoint", endpoint),
- zap.Error(err),
- )
-
- return false
- }
- }
-
- // notification events restoration
- for contract := range c.subscribedEvents {
- contract := contract // See https://github.com/nspcc-dev/neo-go/issues/2890
- id, err = cli.SubscribeForExecutionNotifications(&contract, nil)
- if err != nil {
- c.logger.Error("could not restore notification subscription after RPC switch",
- zap.String("endpoint", endpoint),
- zap.Error(err),
- )
-
- return false
- }
-
- c.subscribedEvents[contract] = id
- }
-
- // notary notification events restoration
- if c.notary != nil {
- for signer := range c.subscribedNotaryEvents {
- signer := signer // See https://github.com/nspcc-dev/neo-go/issues/2890
- id, err = cli.SubscribeForNotaryRequests(nil, &signer)
- if err != nil {
- c.logger.Error("could not restore notary notification subscription after RPC switch",
- zap.String("endpoint", endpoint),
- zap.Error(err),
- )
-
- return false
- }
-
- c.subscribedNotaryEvents[signer] = id
- }
- }
-
- return true
+ return err
}
diff --git a/pkg/morph/client/reputation/client.go b/pkg/morph/client/reputation/client.go
deleted file mode 100644
index cdaf191ad8..0000000000
--- a/pkg/morph/client/reputation/client.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-// Client is a wrapper over StaticClient
-// which makes calls with the names and arguments
-// of the FrostFS reputation contract.
-//
-// Working client must be created via constructor New.
-// Using the Client that has been created with new(Client)
-// expression (or just declaring a Client variable) is unsafe
-// and can lead to panic.
-type Client struct {
- client *client.StaticClient // static reputation contract client
-}
-
-const (
- putMethod = "put"
- getMethod = "get"
- getByIDMethod = "getByID"
- listByEpochMethod = "listByEpoch"
-)
-
-// NewFromMorph returns the wrapper instance from the raw morph client.
-func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...Option) (*Client, error) {
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
- if err != nil {
- return nil, fmt.Errorf("could not create static client of reputation contract: %w", err)
- }
-
- return &Client{client: sc}, nil
-}
-
-// Morph returns raw morph client.
-func (c Client) Morph() *client.Client {
- return c.client.Morph()
-}
-
-// ContractAddress returns the address of the associated contract.
-func (c Client) ContractAddress() util.Uint160 {
- return c.client.ContractAddress()
-}
-
-// Option allows to set an optional
-// parameter of ClientWrapper.
-type Option func(*opts)
-
-type opts []client.StaticClientOption
-
-func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- *o = append(*o, client.TryNotary())
- }
-}
-
-// AsAlphabet returns option to sign main TX
-// of notary requests with client's private
-// key.
-//
-// Considered to be used by IR nodes only.
-func AsAlphabet() Option {
- return func(o *opts) {
- *o = append(*o, client.AsAlphabet())
- }
-}
diff --git a/pkg/morph/client/reputation/get.go b/pkg/morph/client/reputation/get.go
deleted file mode 100644
index 8f1d24176a..0000000000
--- a/pkg/morph/client/reputation/get.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-type (
- // GetPrm groups the arguments of "get reputation value" test invocation.
- GetPrm struct {
- epoch uint64
- peerID reputation.PeerID
- }
-
- // GetByIDPrm groups the arguments of "get reputation value by
- // reputation id" test invocation.
- GetByIDPrm struct {
- id ID
- }
-)
-
-// SetEpoch sets epoch of expected reputation value.
-func (g *GetPrm) SetEpoch(v uint64) {
- g.epoch = v
-}
-
-// SetPeerID sets peer id of expected reputation value.
-func (g *GetPrm) SetPeerID(v reputation.PeerID) {
- g.peerID = v
-}
-
-// SetID sets id of expected reputation value in reputation contract.
-func (g *GetByIDPrm) SetID(v ID) {
- g.id = v
-}
-
-// Get invokes the call of "get reputation value" method of reputation contract.
-func (c *Client) Get(p GetPrm) ([]reputation.GlobalTrust, error) {
- invokePrm := client.TestInvokePrm{}
- invokePrm.SetMethod(getMethod)
- invokePrm.SetArgs(p.epoch, p.peerID.PublicKey())
-
- res, err := c.client.TestInvoke(invokePrm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err)
- }
-
- return parseReputations(res, getMethod)
-}
-
-// GetByID invokes the call of "get reputation value by reputation id" method
-// of reputation contract.
-func (c *Client) GetByID(p GetByIDPrm) ([]reputation.GlobalTrust, error) {
- invokePrm := client.TestInvokePrm{}
- invokePrm.SetMethod(getByIDMethod)
- invokePrm.SetArgs([]byte(p.id))
-
- prms, err := c.client.TestInvoke(invokePrm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getByIDMethod, err)
- }
-
- return parseReputations(prms, getByIDMethod)
-}
-
-func parseGetResult(rawReputations [][]byte, method string) ([]reputation.GlobalTrust, error) {
- reputations := make([]reputation.GlobalTrust, 0, len(rawReputations))
-
- for i := range rawReputations {
- r := reputation.GlobalTrust{}
-
- err := r.Unmarshal(rawReputations[i])
- if err != nil {
- return nil, fmt.Errorf("can't unmarshal global trust value (%s): %w", method, err)
- }
-
- reputations = append(reputations, r)
- }
-
- return reputations, nil
-}
-
-func parseReputations(items []stackitem.Item, method string) ([]reputation.GlobalTrust, error) {
- if ln := len(items); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", method, ln)
- }
-
- items, err := client.ArrayFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err)
- }
-
- res := make([][]byte, 0, len(items))
-
- for i := range items {
- rawReputation, err := client.BytesFromStackItem(items[i])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", method, err)
- }
-
- res = append(res, rawReputation)
- }
-
- return parseGetResult(res, method)
-}
diff --git a/pkg/morph/client/reputation/list.go b/pkg/morph/client/reputation/list.go
deleted file mode 100644
index 0090efb10b..0000000000
--- a/pkg/morph/client/reputation/list.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-type (
- // ID is an ID of the reputation record in reputation contract.
- ID []byte
-
- // ListByEpochArgs groups the arguments of
- // "list reputation ids by epoch" test invoke call.
- ListByEpochArgs struct {
- epoch uint64
- }
-)
-
-// SetEpoch sets epoch of expected reputation ids.
-func (l *ListByEpochArgs) SetEpoch(v uint64) {
- l.epoch = v
-}
-
-// ListByEpoch invokes the call of "list reputation ids by epoch" method of
-// reputation contract.
-func (c *Client) ListByEpoch(p ListByEpochArgs) ([]ID, error) {
- invokePrm := client.TestInvokePrm{}
- invokePrm.SetMethod(listByEpochMethod)
- invokePrm.SetArgs(p.epoch)
-
- prms, err := c.client.TestInvoke(invokePrm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listByEpochMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", listByEpochMethod, ln)
- }
-
- items, err := client.ArrayFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listByEpochMethod, err)
- }
-
- result := make([]ID, 0, len(items))
- for i := range items {
- rawReputation, err := client.BytesFromStackItem(items[i])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listByEpochMethod, err)
- }
-
- result = append(result, rawReputation)
- }
-
- return result, nil
-}
diff --git a/pkg/morph/client/reputation/put.go b/pkg/morph/client/reputation/put.go
deleted file mode 100644
index 02b47defe8..0000000000
--- a/pkg/morph/client/reputation/put.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-type (
- // PutPrm groups the arguments of "put reputation value" invocation call.
- PutPrm struct {
- epoch uint64
- peerID reputation.PeerID
- value reputation.GlobalTrust
- }
-)
-
-// SetEpoch sets epoch of reputation value.
-func (p *PutPrm) SetEpoch(v uint64) {
- p.epoch = v
-}
-
-// SetPeerID sets peer id of reputation value.
-func (p *PutPrm) SetPeerID(v reputation.PeerID) {
- p.peerID = v
-}
-
-// SetValue sets reputation value.
-func (p *PutPrm) SetValue(v reputation.GlobalTrust) {
- p.value = v
-}
-
-// Put invokes direct call of "put reputation value" method of reputation contract.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) Put(p PutPrm) error {
- prm := client.InvokePrm{}
- prm.SetMethod(putMethod)
- prm.SetArgs(p.epoch, p.peerID.PublicKey(), p.value.Marshal())
-
- err := c.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", putMethod, err)
- }
- return nil
-}
diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go
index afaf49f3d4..c4eb120d26 100644
--- a/pkg/morph/client/static.go
+++ b/pkg/morph/client/static.go
@@ -1,8 +1,10 @@
package client
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -27,7 +29,7 @@ type staticOpts struct {
tryNotary bool
alpha bool // use client's key to sign notary request's main TX
- fees fees
+ fee fixedn.Fixed8
}
// WithNotary returns notary status of the client.
@@ -63,7 +65,7 @@ func NewStatic(client *Client, scriptHash util.Uint160, fee fixedn.Fixed8, opts
scScriptHash: scriptHash,
}
- c.fees.defaultFee = fee
+ c.fee = fee
for i := range opts {
opts[i](&c.staticOpts)
@@ -94,6 +96,14 @@ type InvokePrmOptional struct {
// `validUntilBlock` values by all notification
// receivers.
hash *util.Uint256
+ // controlTX controls whether the invoke method will use a rounded
+ // block height value, which is useful for control transactions which
+ // are required to be produced by all nodes with very high probability.
+ // It's only used by notary transactions and it affects only the
+ // computation of `validUntilBlock` values.
+ controlTX bool
+ // vub is used to set custom valid until block value.
+ vub uint32
}
// SetHash sets optional hash of the transaction.
@@ -104,6 +114,26 @@ func (i *InvokePrmOptional) SetHash(hash util.Uint256) {
i.hash = &hash
}
+// SetControlTX sets whether a control transaction will be used.
+func (i *InvokePrmOptional) SetControlTX(b bool) {
+ i.controlTX = b
+}
+
+// IsControl gets whether a control transaction will be used.
+func (i *InvokePrmOptional) IsControl() bool {
+ return i.controlTX
+}
+
+// SetVUB sets valid until block value.
+func (i *InvokePrmOptional) SetVUB(v uint32) {
+ i.vub = v
+}
+
+type InvokeRes struct {
+ Hash util.Uint256
+ VUB uint32
+}
+
// Invoke calls Invoke method of Client with static internal script hash and fee.
// Supported args types are the same as in Client.
//
@@ -113,36 +143,47 @@ func (i *InvokePrmOptional) SetHash(hash util.Uint256) {
//
// If fee for the operation executed using specified method is customized, then StaticClient uses it.
// Otherwise, default fee is used.
-func (s StaticClient) Invoke(prm InvokePrm) error {
- fee := s.fees.feeForMethod(prm.method)
-
+func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) {
+ var vubP *uint32
if s.tryNotary {
if s.alpha {
var (
nonce uint32 = 1
- vubP *uint32
vub uint32
err error
)
if prm.hash != nil {
- nonce, vub, err = s.client.CalculateNonceAndVUB(*prm.hash)
+ if prm.controlTX {
+ nonce, vub, err = s.client.CalculateNonceAndVUBControl(prm.hash)
+ } else {
+ nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash)
+ }
if err != nil {
- return fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err)
+ return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err)
}
vubP = &vub
}
- return s.client.NotaryInvoke(s.scScriptHash, fee, nonce, vubP, prm.method, prm.args...)
+ if prm.vub > 0 {
+ vubP = &prm.vub
+ }
+
+ return s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
}
- return s.client.NotaryInvokeNotAlpha(s.scScriptHash, fee, prm.method, prm.args...)
+ if prm.vub > 0 {
+ vubP = &prm.vub
+ }
+
+ return s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
}
return s.client.Invoke(
+ ctx,
s.scScriptHash,
- fee,
+ s.fee,
prm.method,
prm.args...,
)
@@ -165,7 +206,9 @@ func (ti *TestInvokePrm) SetArgs(args ...any) {
}
// TestInvoke calls TestInvoke method of Client with static internal script hash.
-func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) {
+func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method)
+ defer span.End()
return s.client.TestInvoke(
s.scScriptHash,
prm.method,
@@ -196,11 +239,3 @@ func AsAlphabet() StaticClientOption {
o.alpha = true
}
}
-
-// WithCustomFee returns option to specify custom fee for the operation executed using
-// specified contract method.
-func WithCustomFee(method string, fee fixedn.Fixed8) StaticClientOption {
- return func(o *staticOpts) {
- o.fees.setFeeForMethod(method, fee)
- }
-}
diff --git a/pkg/morph/client/subnet/admin.go b/pkg/morph/client/subnet/admin.go
deleted file mode 100644
index 387da656d8..0000000000
--- a/pkg/morph/client/subnet/admin.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package morphsubnet
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-
-// ManageAdminsPrm groups parameters of administer methods of Subnet contract.
-//
-// Zero value adds node admin. Subnet, key and group must be specified via setters.
-type ManageAdminsPrm struct {
- // remove or add admin
- rm bool
-
- // client or node admin
- client bool
-
- subnet []byte
-
- admin []byte
-
- group []byte
-}
-
-// SetRemove marks admin to be removed. By default, admin is added.
-func (x *ManageAdminsPrm) SetRemove() {
- x.rm = true
-}
-
-// SetClient switches to client admin. By default, node admin is modified.
-func (x *ManageAdminsPrm) SetClient() {
- x.client = true
-}
-
-// SetSubnet sets identifier of the subnet in a binary FrostFS API protocol format.
-func (x *ManageAdminsPrm) SetSubnet(id []byte) {
- x.subnet = id
-}
-
-// SetAdmin sets admin's public key in a binary format.
-func (x *ManageAdminsPrm) SetAdmin(key []byte) {
- x.admin = key
-}
-
-// SetGroup sets identifier of the client group in a binary FrostFS API protocol format.
-// Makes sense only for client admins (see ManageAdminsPrm.SetClient).
-func (x *ManageAdminsPrm) SetGroup(id []byte) {
- x.group = id
-}
-
-// ManageAdminsRes groups the resulting values of node administer methods of Subnet contract.
-type ManageAdminsRes struct{}
-
-// ManageAdmins manages admin list of the FrostFS subnet through Subnet contract calls.
-func (x Client) ManageAdmins(prm ManageAdminsPrm) (*ManageAdminsPrm, error) {
- var method string
-
- args := make([]any, 1, 3)
- args[0] = prm.subnet
-
- if prm.client {
- args = append(args, prm.group, prm.admin)
-
- if prm.rm {
- method = removeClientAdminMethod
- } else {
- method = addClientAdminMethod
- }
- } else {
- args = append(args, prm.admin)
-
- if prm.rm {
- method = removeNodeAdminMethod
- } else {
- method = addNodeAdminMethod
- }
- }
-
- var prmInvoke client.InvokePrm
-
- prmInvoke.SetMethod(method)
- prmInvoke.SetArgs(args...)
-
- err := x.client.Invoke(prmInvoke)
- if err != nil {
- return nil, err
- }
-
- return new(ManageAdminsPrm), nil
-}
diff --git a/pkg/morph/client/subnet/client.go b/pkg/morph/client/subnet/client.go
deleted file mode 100644
index 8cbae8f954..0000000000
--- a/pkg/morph/client/subnet/client.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package morphsubnet
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-// Client represents Subnet contract client.
-//
-// Client should be preliminary initialized (see Init method).
-type Client struct {
- client *client.StaticClient
-}
-
-// InitPrm groups parameters of Client's initialization.
-type InitPrm struct {
- base *client.Client
-
- addr util.Uint160
-
- modeSet bool
- mode Mode
-}
-
-const (
- deleteMethod = "delete"
- getMethod = "get"
- putMethod = "put"
-
- removeClientAdminMethod = "removeClientAdmin"
- addClientAdminMethod = "addClientAdmin"
-
- userAllowedMethod = "userAllowed"
- removeUserMethod = "removeUser"
- addUserMethod = "addUser"
-
- removeNodeAdminMethod = "removeNodeAdmin"
- addNodeAdminMethod = "addNodeAdmin"
- nodeAllowedMethod = "nodeAllowed"
- removeNodeMethod = "removeNode"
- addNodeMethod = "addNode"
-)
-
-// SetBaseClient sets basic morph client.
-func (x *InitPrm) SetBaseClient(base *client.Client) {
- x.base = base
-}
-
-// SetContractAddress sets address of Subnet contract in FrostFS sidechain.
-func (x *InitPrm) SetContractAddress(addr util.Uint160) {
- x.addr = addr
-}
-
-// Mode regulates client work mode.
-type Mode uint8
-
-const (
- _ Mode = iota
-
- // NonNotary makes client to work in non-notary environment.
- NonNotary
-
- // NotaryAlphabet makes client to use its internal key for signing the notary requests.
- NotaryAlphabet
-
- // NotaryNonAlphabet makes client to not use its internal key for signing the notary requests.
- NotaryNonAlphabet
-)
-
-// SetMode makes client to work with non-notary sidechain.
-// By default, NonNotary is used.
-func (x *InitPrm) SetMode(mode Mode) {
- x.modeSet = true
- x.mode = mode
-}
-
-// Init initializes client with specified parameters.
-//
-// Base client must be set.
-func (x *Client) Init(prm InitPrm) error {
- if prm.base == nil {
- panic("missing base morph client")
- }
-
- if !prm.modeSet {
- prm.mode = NonNotary
- }
-
- var opts []client.StaticClientOption
-
- switch prm.mode {
- default:
- panic(fmt.Sprintf("invalid work mode %d", prm.mode))
- case NonNotary:
- case NotaryNonAlphabet:
- opts = []client.StaticClientOption{client.TryNotary()}
- case NotaryAlphabet:
- opts = []client.StaticClientOption{client.TryNotary(), client.AsAlphabet()}
- }
-
- var err error
-
- x.client, err = client.NewStatic(prm.base, prm.addr, 0, opts...)
-
- return err
-}
diff --git a/pkg/morph/client/subnet/clients.go b/pkg/morph/client/subnet/clients.go
deleted file mode 100644
index 1c855496e7..0000000000
--- a/pkg/morph/client/subnet/clients.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package morphsubnet
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-// UserAllowedPrm groups parameters of UserAllowed method of Subnet contract.
-type UserAllowedPrm struct {
- args [2]any
-}
-
-// SetID sets identifier of the subnet in a binary FrostFS API protocol format.
-func (x *UserAllowedPrm) SetID(id []byte) {
- x.args[0] = id
-}
-
-// SetClient sets owner ID of the client that is being checked in a binary FrostFS API protocol format.
-func (x *UserAllowedPrm) SetClient(id []byte) {
- x.args[1] = id
-}
-
-// UserAllowedRes groups the resulting values of UserAllowed method of Subnet contract.
-type UserAllowedRes struct {
- result bool
-}
-
-// Allowed returns true iff the client is allowed to create containers in the subnet.
-func (x UserAllowedRes) Allowed() bool {
- return x.result
-}
-
-// UserAllowed checks if the user has access to the subnetwork.
-func (x *Client) UserAllowed(prm UserAllowedPrm) (*UserAllowedRes, error) {
- args := client.TestInvokePrm{}
-
- args.SetMethod(userAllowedMethod)
- args.SetArgs(prm.args[:]...)
-
- res, err := x.client.TestInvoke(args)
- if err != nil {
- return nil, fmt.Errorf("could not make test invoke: %w", err)
- }
-
- if len(res) == 0 {
- return nil, errEmptyResponse
- }
-
- result, err := client.BoolFromStackItem(res[0])
- if err != nil {
- return nil, err
- }
-
- return &UserAllowedRes{
- result: result,
- }, nil
-}
-
-// ManageClientsPrm groups parameters of client management in Subnet contract.
-//
-// Zero value adds subnet client. Subnet, group and client ID must be specified via setters.
-type ManageClientsPrm struct {
- // remove or add client
- rm bool
-
- args [3]any
-}
-
-// SetRemove marks client to be removed. By default, client is added.
-func (x *ManageClientsPrm) SetRemove() {
- x.rm = true
-}
-
-// SetSubnet sets identifier of the subnet in a binary FrostFS API protocol format.
-func (x *ManageClientsPrm) SetSubnet(id []byte) {
- x.args[0] = id
-}
-
-// SetGroup sets identifier of the client group in a binary FrostFS API protocol format.
-func (x *ManageClientsPrm) SetGroup(id []byte) {
- x.args[1] = id
-}
-
-// SetClient sets client's user ID in a binary FrostFS API protocol format.
-func (x *ManageClientsPrm) SetClient(id []byte) {
- x.args[2] = id
-}
-
-// ManageClientsRes groups the resulting values of client management methods of Subnet contract.
-type ManageClientsRes struct{}
-
-// ManageClients manages client list of the FrostFS subnet through Subnet contract calls.
-func (x Client) ManageClients(prm ManageClientsPrm) (*ManageClientsRes, error) {
- var method string
-
- if prm.rm {
- method = removeUserMethod
- } else {
- method = addUserMethod
- }
-
- var prmInvoke client.InvokePrm
-
- prmInvoke.SetMethod(method)
- prmInvoke.SetArgs(prm.args[:]...)
-
- err := x.client.Invoke(prmInvoke)
- if err != nil {
- return nil, err
- }
-
- return new(ManageClientsRes), nil
-}
diff --git a/pkg/morph/client/subnet/delete.go b/pkg/morph/client/subnet/delete.go
deleted file mode 100644
index f7f8bb2c97..0000000000
--- a/pkg/morph/client/subnet/delete.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package morphsubnet
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-// DeletePrm groups parameters of Delete method of Subnet contract.
-type DeletePrm struct {
- cliPrm client.InvokePrm
-
- args [1]any
-}
-
-// SetTxHash sets hash of the transaction which spawned the notification.
-// Ignore this parameter for new requests.
-func (x *DeletePrm) SetTxHash(hash util.Uint256) {
- x.cliPrm.SetHash(hash)
-}
-
-// SetID sets identifier of the subnet to be removed in a binary FrostFS API protocol format.
-func (x *DeletePrm) SetID(id []byte) {
- x.args[0] = id
-}
-
-// DeleteRes groups the resulting values of Delete method of Subnet contract.
-type DeleteRes struct{}
-
-// Delete removes subnet though the call of the corresponding method of the Subnet contract.
-func (x Client) Delete(prm DeletePrm) (*DeleteRes, error) {
- prm.cliPrm.SetMethod(deleteMethod)
- prm.cliPrm.SetArgs(prm.args[:]...)
-
- err := x.client.Invoke(prm.cliPrm)
- if err != nil {
- return nil, err
- }
-
- return new(DeleteRes), nil
-}
diff --git a/pkg/morph/client/subnet/get.go b/pkg/morph/client/subnet/get.go
deleted file mode 100644
index 5cd7c39a0c..0000000000
--- a/pkg/morph/client/subnet/get.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package morphsubnet
-
-import (
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-// GetPrm groups parameters of Get method of Subnet contract.
-type GetPrm struct {
- args [1]any
-}
-
-// SetID sets identifier of the subnet to be read in a binary FrostFS API protocol format.
-func (x *GetPrm) SetID(id []byte) {
- x.args[0] = id
-}
-
-// GetRes groups the resulting values of Get method of Subnet contract.
-type GetRes struct {
- info []byte
-}
-
-// Info returns information about the subnet in a binary format of FrostFS API protocol.
-func (x GetRes) Info() []byte {
- return x.info
-}
-
-var errEmptyResponse = errors.New("empty response")
-
-// Get reads the subnet through the call of the corresponding method of the Subnet contract.
-func (x *Client) Get(prm GetPrm) (*GetRes, error) {
- var prmGet client.TestInvokePrm
-
- prmGet.SetMethod(getMethod)
- prmGet.SetArgs(prm.args[:]...)
-
- res, err := x.client.TestInvoke(prmGet)
- if err != nil {
- return nil, err
- }
-
- if len(res) == 0 {
- return nil, errEmptyResponse
- }
-
- data, err := client.BytesFromStackItem(res[0])
- if err != nil {
- return nil, err
- }
-
- return &GetRes{
- info: data,
- }, nil
-}
diff --git a/pkg/morph/client/subnet/node.go b/pkg/morph/client/subnet/node.go
deleted file mode 100644
index 134b92943f..0000000000
--- a/pkg/morph/client/subnet/node.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package morphsubnet
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-// NodeAllowedPrm groups parameters of NodeAllowed method of Subnet contract.
-type NodeAllowedPrm struct {
- cliPrm client.TestInvokePrm
-
- args [2]any
-}
-
-// SetID sets identifier of the subnet of the node in a binary FrostFS API protocol format.
-func (x *NodeAllowedPrm) SetID(id []byte) {
- x.args[0] = id
-}
-
-// SetNode sets public key of the node that is being checked.
-func (x *NodeAllowedPrm) SetNode(id []byte) {
- x.args[1] = id
-}
-
-// NodeAllowedRes groups the resulting values of NodeAllowed method of Subnet contract.
-type NodeAllowedRes struct {
- result bool
-}
-
-// Allowed returns true iff the node is allowed to enter the subnet.
-func (x NodeAllowedRes) Allowed() bool {
- return x.result
-}
-
-// NodeAllowed checks if the node is included in the subnetwork.
-func (x *Client) NodeAllowed(prm NodeAllowedPrm) (*NodeAllowedRes, error) {
- prm.cliPrm.SetMethod(nodeAllowedMethod)
- prm.cliPrm.SetArgs(prm.args[:]...)
-
- res, err := x.client.TestInvoke(prm.cliPrm)
- if err != nil {
- return nil, fmt.Errorf("could not make test invoke: %w", err)
- }
-
- if len(res) == 0 {
- return nil, errEmptyResponse
- }
-
- result, err := client.BoolFromStackItem(res[0])
- if err != nil {
- return nil, err
- }
-
- return &NodeAllowedRes{
- result: result,
- }, nil
-}
diff --git a/pkg/morph/client/subnet/nodes.go b/pkg/morph/client/subnet/nodes.go
deleted file mode 100644
index 68725a0165..0000000000
--- a/pkg/morph/client/subnet/nodes.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package morphsubnet
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-
-// ManageNodesPrm groups parameters of node management in Subnet contract.
-//
-// Zero value adds node to subnet. Subnet and node IDs must be specified via setters.
-type ManageNodesPrm struct {
- // remove or add node
- rm bool
-
- args [2]any
-}
-
-// SetRemove marks node to be removed. By default, node is added.
-func (x *ManageNodesPrm) SetRemove() {
- x.rm = true
-}
-
-// SetSubnet sets identifier of the subnet in a binary NeoFS API protocol format.
-func (x *ManageNodesPrm) SetSubnet(id []byte) {
- x.args[0] = id
-}
-
-// SetNode sets node's public key in a binary format.
-func (x *ManageNodesPrm) SetNode(id []byte) {
- x.args[1] = id
-}
-
-// ManageNodesRes groups the resulting values of node management methods of Subnet contract.
-type ManageNodesRes struct{}
-
-// ManageNodes manages node list of the NeoFS subnet through Subnet contract calls.
-func (x Client) ManageNodes(prm ManageNodesPrm) (*ManageNodesRes, error) {
- var method string
-
- if prm.rm {
- method = removeNodeMethod
- } else {
- method = addNodeMethod
- }
-
- var prmInvoke client.InvokePrm
-
- prmInvoke.SetMethod(method)
- prmInvoke.SetArgs(prm.args[:]...)
-
- err := x.client.Invoke(prmInvoke)
- if err != nil {
- return nil, err
- }
-
- return new(ManageNodesRes), nil
-}
diff --git a/pkg/morph/client/subnet/put.go b/pkg/morph/client/subnet/put.go
deleted file mode 100644
index 2046e79c28..0000000000
--- a/pkg/morph/client/subnet/put.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package morphsubnet
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-// PutPrm groups parameters of Put method of Subnet contract.
-type PutPrm struct {
- cliPrm client.InvokePrm
-
- args [3]any
-}
-
-// SetTxHash sets hash of the transaction which spawned the notification.
-// Ignore this parameter for new requests.
-func (x *PutPrm) SetTxHash(hash util.Uint256) {
- x.cliPrm.SetHash(hash)
-}
-
-// SetID sets identifier of the created subnet in a binary FrostFS API protocol format.
-func (x *PutPrm) SetID(id []byte) {
- x.args[0] = id
-}
-
-// SetOwner sets identifier of the subnet owner in a binary FrostFS API protocol format.
-func (x *PutPrm) SetOwner(id []byte) {
- x.args[1] = id
-}
-
-// SetInfo sets information about the created subnet in a binary FrostFS API protocol format.
-func (x *PutPrm) SetInfo(id []byte) {
- x.args[2] = id
-}
-
-// PutRes groups the resulting values of Put method of Subnet contract.
-type PutRes struct{}
-
-// Put creates subnet though the call of the corresponding method of the Subnet contract.
-func (x Client) Put(prm PutPrm) (*PutRes, error) {
- prm.cliPrm.SetMethod(putMethod)
- prm.cliPrm.SetArgs(prm.args[:]...)
-
- err := x.client.Invoke(prm.cliPrm)
- if err != nil {
- return nil, err
- }
-
- return new(PutRes), nil
-}
diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go
index ff8e507fec..f7b6705a8c 100644
--- a/pkg/morph/client/util.go
+++ b/pkg/morph/client/util.go
@@ -53,7 +53,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) {
case stackitem.IntegerT:
n, err := param.TryInteger()
if err != nil {
- return nil, fmt.Errorf("can't parse integer bytes: %w", err)
+ return nil, fmt.Errorf("parse integer bytes: %w", err)
}
return n.Bytes(), nil
@@ -69,7 +69,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) {
// ArrayFromStackItem returns the slice contract parameters from passed parameter.
//
-// If passed parameter carries boolean false value, (nil, nil) returns.
+// If passed parameter carries boolean false value, returns (nil, nil).
func ArrayFromStackItem(param stackitem.Item) ([]stackitem.Item, error) {
switch param.Type() {
case stackitem.AnyT:
@@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) {
func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error {
return func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != HaltState {
- return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
+ return ¬HaltStateError{state: r.State, exception: r.FaultException}
}
t.SystemFee += add
diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go
new file mode 100644
index 0000000000..87fcf84b88
--- /dev/null
+++ b/pkg/morph/client/waiter.go
@@ -0,0 +1,51 @@
+package client
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
+)
+
+type waiterClient struct {
+ c *Client
+}
+
+func (w *waiterClient) Context() context.Context {
+ return context.Background()
+}
+
+func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
+ return w.c.GetApplicationLog(hash, trig)
+}
+
+func (w *waiterClient) GetBlockCount() (uint32, error) {
+ return w.c.BlockCount()
+}
+
+func (w *waiterClient) GetVersion() (*result.Version, error) {
+ return w.c.GetVersion()
+}
+
+// WaitTxHalt waits until transaction with the specified hash persists on the blockchain.
+// It also checks execution result to finish in HALT state.
+func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error {
+ w, err := waiter.NewPollingBased(&waiterClient{c: c})
+ if err != nil {
+ return fmt.Errorf("create tx waiter: %w", err)
+ }
+
+ res, err := w.WaitAny(ctx, vub, h)
+ if err != nil {
+ return fmt.Errorf("wait until tx persists: %w", err)
+ }
+
+ if res.VMState.HasFlag(vmstate.Halt) {
+ return nil
+ }
+ return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}
+}
diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go
index 062a2a8864..99f80584aa 100644
--- a/pkg/morph/event/balance/lock.go
+++ b/pkg/morph/event/balance/lock.go
@@ -3,7 +3,7 @@ package balance
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -47,61 +47,17 @@ func (l Lock) TxHash() util.Uint256 { return l.txHash }
// ParseLock from notification into lock structure.
func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Lock
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var le balance.LockEvent
+ if err := le.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse balance.LockEvent: %w", err)
}
- if ln := len(params); ln != 5 {
- return nil, event.WrongNumberOfParameters(5, ln)
- }
-
- // parse id
- ev.id, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get lock id: %w", err)
- }
-
- // parse user
- user, err := client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get lock user value: %w", err)
- }
-
- ev.user, err = util.Uint160DecodeBytesBE(user)
- if err != nil {
- return nil, fmt.Errorf("could not convert lock user value to uint160: %w", err)
- }
-
- // parse lock account
- lock, err := client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get lock account value: %w", err)
- }
-
- ev.lock, err = util.Uint160DecodeBytesBE(lock)
- if err != nil {
- return nil, fmt.Errorf("could not convert lock account value to uint160: %w", err)
- }
-
- // parse amount
- ev.amount, err = client.IntFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get lock amount: %w", err)
- }
-
- // parse until deadline
- ev.until, err = client.IntFromStackItem(params[4])
- if err != nil {
- return nil, fmt.Errorf("could not get lock deadline: %w", err)
- }
-
- ev.txHash = e.Container
-
- return ev, nil
+ return Lock{
+ id: le.TxID,
+ user: le.From,
+ lock: le.To,
+ amount: le.Amount.Int64(),
+ until: le.Until.Int64(),
+ txHash: e.Container,
+ }, nil
}
diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go
index 9199bcd552..87b91aede4 100644
--- a/pkg/morph/event/balance/lock_test.go
+++ b/pkg/morph/event/balance/lock_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -28,7 +27,7 @@ func TestParseLock(t *testing.T) {
}
_, err := ParseLock(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(5, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go
index 398466f511..d28f6d5211 100644
--- a/pkg/morph/event/container/delete.go
+++ b/pkg/morph/event/container/delete.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -12,76 +12,38 @@ import (
// Delete structure of container.Delete notification from morph chain.
type Delete struct {
- containerID []byte
- signature []byte
- token []byte
+ ContainerIDValue []byte
+ SignatureValue []byte
+ TokenValue []byte
+ PublicKeyValue []byte
// For notary notifications only.
// Contains raw transactions of notary request.
- notaryRequest *payload.P2PNotaryRequest
+ NotaryRequestValue *payload.P2PNotaryRequest
}
// MorphEvent implements Neo:Morph Event interface.
func (Delete) MorphEvent() {}
// ContainerID is a marshalled container structure, defined in API.
-func (d Delete) ContainerID() []byte { return d.containerID }
+func (d Delete) ContainerID() []byte { return d.ContainerIDValue }
// Signature of marshalled container by container owner.
-func (d Delete) Signature() []byte { return d.signature }
+func (d Delete) Signature() []byte { return d.SignatureValue }
// SessionToken returns binary token of the session
// within which the eACL was set.
func (d Delete) SessionToken() []byte {
- return d.token
+ return d.TokenValue
}
// NotaryRequest returns raw notary request if notification
// was received via notary service. Otherwise, returns nil.
func (d Delete) NotaryRequest() *payload.P2PNotaryRequest {
- return d.notaryRequest
+ return d.NotaryRequestValue
}
-const expectedItemNumDelete = 3
-
-// ParseDelete from notification into container event structure.
-//
-// Expects 3 stack items.
-func ParseDelete(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Delete
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != expectedItemNumDelete {
- return nil, event.WrongNumberOfParameters(expectedItemNumDelete, ln)
- }
-
- // parse container
- ev.containerID, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get container: %w", err)
- }
-
- // parse signature
- ev.signature, err = client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get signature: %w", err)
- }
-
- // parse session token
- ev.token, err = client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get session token: %w", err)
- }
-
- return ev, nil
-}
+const expectedItemNumDelete = 4
// DeleteSuccess structures notification event of successful container removal
// thrown by Container contract.
@@ -96,28 +58,14 @@ func (DeleteSuccess) MorphEvent() {}
// ParseDeleteSuccess decodes notification event thrown by Container contract into
// DeleteSuccess and returns it as event.Event.
func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- items, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
+ var dse container.DeleteSuccessEvent
+ if err := dse.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err)
}
- const expectedItemNumDeleteSuccess = 1
-
- if ln := len(items); ln != expectedItemNumDeleteSuccess {
- return nil, event.WrongNumberOfParameters(expectedItemNumDeleteSuccess, ln)
- }
-
- binID, err := client.BytesFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("parse container ID item: %w", err)
- }
-
- var res DeleteSuccess
-
- err = res.ID.Decode(binID)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
- }
-
- return res, nil
+ var cnr cid.ID
+ cnr.SetSHA256(dse.ContainerID)
+ return DeleteSuccess{
+ ID: cnr,
+ }, nil
}
diff --git a/pkg/morph/event/container/delete_notary.go b/pkg/morph/event/container/delete_notary.go
index 371f18733a..9711636e7a 100644
--- a/pkg/morph/event/container/delete_notary.go
+++ b/pkg/morph/event/container/delete_notary.go
@@ -7,25 +7,30 @@ import (
func (d *Delete) setContainerID(v []byte) {
if v != nil {
- d.containerID = v
+ d.ContainerIDValue = v
}
}
func (d *Delete) setSignature(v []byte) {
if v != nil {
- d.signature = v
+ d.SignatureValue = v
}
}
+func (d *Delete) setPublicKey(v []byte) {
+ d.PublicKeyValue = v
+}
+
func (d *Delete) setToken(v []byte) {
if v != nil {
- d.token = v
+ d.TokenValue = v
}
}
var deleteFieldSetters = []func(*Delete, []byte){
// order on stack is reversed
(*Delete).setToken,
+ (*Delete).setPublicKey,
(*Delete).setSignature,
(*Delete).setContainerID,
}
@@ -62,7 +67,7 @@ func ParseDeleteNotary(ne event.NotaryEvent) (event.Event, error) {
}
}
- ev.notaryRequest = ne.Raw()
+ ev.NotaryRequestValue = ne.Raw()
return ev, nil
}
diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go
index 8bf8947913..62e7d7277c 100644
--- a/pkg/morph/event/container/delete_test.go
+++ b/pkg/morph/event/container/delete_test.go
@@ -4,72 +4,11 @@ import (
"crypto/sha256"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
-func TestParseDelete(t *testing.T) {
- var (
- containerID = []byte("containreID")
- signature = []byte("signature")
- token = []byte("token")
- )
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- }
-
- _, err := ParseDelete(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
- })
-
- t.Run("wrong container parameter", func(t *testing.T) {
- _, err := ParseDelete(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong signature parameter", func(t *testing.T) {
- _, err := ParseDelete(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(containerID),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong session token parameter", func(t *testing.T) {
- _, err := ParseDelete(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(containerID),
- stackitem.NewByteArray(signature),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := ParseDelete(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(containerID),
- stackitem.NewByteArray(signature),
- stackitem.NewByteArray(token),
- }))
-
- require.NoError(t, err)
-
- require.Equal(t, Delete{
- containerID: containerID,
- signature: signature,
- token: token,
- }, ev)
- })
-}
-
func TestParseDeleteSuccess(t *testing.T) {
t.Run("wrong number of parameters", func(t *testing.T) {
prms := []stackitem.Item{
@@ -78,7 +17,7 @@ func TestParseDeleteSuccess(t *testing.T) {
}
_, err := ParseDeleteSuccess(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong container parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/eacl.go b/pkg/morph/event/container/eacl.go
deleted file mode 100644
index 8ef6a71af8..0000000000
--- a/pkg/morph/event/container/eacl.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package container
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/network/payload"
-)
-
-// SetEACL represents structure of notification about
-// modified eACL table coming from FrostFS Container contract.
-type SetEACL struct {
- table []byte
- signature []byte
- publicKey []byte
- token []byte
-
- // For notary notifications only.
- // Contains raw transactions of notary request.
- notaryRequest *payload.P2PNotaryRequest
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (SetEACL) MorphEvent() {}
-
-// Table returns returns eACL table in a binary FrostFS API format.
-func (x SetEACL) Table() []byte {
- return x.table
-}
-
-// Signature returns signature of the binary table.
-func (x SetEACL) Signature() []byte {
- return x.signature
-}
-
-// PublicKey returns public keys of container
-// owner in a binary format.
-func (x SetEACL) PublicKey() []byte {
- return x.publicKey
-}
-
-// SessionToken returns binary token of the session
-// within which the eACL was set.
-func (x SetEACL) SessionToken() []byte {
- return x.token
-}
-
-// NotaryRequest returns raw notary request if notification
-// was received via notary service. Otherwise, returns nil.
-func (x SetEACL) NotaryRequest() *payload.P2PNotaryRequest {
- return x.notaryRequest
-}
-
-const expectedItemNumEACL = 4
-
-// ParseSetEACL parses SetEACL notification event from list of stack items.
-//
-// Expects 4 stack items.
-func ParseSetEACL(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev SetEACL
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != expectedItemNumEACL {
- return nil, event.WrongNumberOfParameters(expectedItemNumEACL, ln)
- }
-
- // parse table
- ev.table, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not parse binary table: %w", err)
- }
-
- // parse signature
- ev.signature, err = client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not parse table signature: %w", err)
- }
-
- // parse public key
- ev.publicKey, err = client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not parse binary public key: %w", err)
- }
-
- // parse session token
- ev.token, err = client.BytesFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get session token: %w", err)
- }
-
- return ev, nil
-}
diff --git a/pkg/morph/event/container/eacl_notary.go b/pkg/morph/event/container/eacl_notary.go
deleted file mode 100644
index 112065b42b..0000000000
--- a/pkg/morph/event/container/eacl_notary.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package container
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
-)
-
-func (x *SetEACL) setTable(v []byte) {
- if v != nil {
- x.table = v
- }
-}
-
-func (x *SetEACL) setSignature(v []byte) {
- if v != nil {
- x.signature = v
- }
-}
-
-func (x *SetEACL) setPublicKey(v []byte) {
- if v != nil {
- x.publicKey = v
- }
-}
-
-func (x *SetEACL) setToken(v []byte) {
- if v != nil {
- x.token = v
- }
-}
-
-var setEACLFieldSetters = []func(*SetEACL, []byte){
- // order on stack is reversed
- (*SetEACL).setToken,
- (*SetEACL).setPublicKey,
- (*SetEACL).setSignature,
- (*SetEACL).setTable,
-}
-
-const (
- // SetEACLNotaryEvent is method name for container EACL operations
- // in `Container` contract. Is used as identificator for notary
- // EACL changing requests.
- SetEACLNotaryEvent = "setEACL"
-)
-
-// ParseSetEACLNotary from NotaryEvent into container event structure.
-func ParseSetEACLNotary(ne event.NotaryEvent) (event.Event, error) {
- var (
- ev SetEACL
- currentOp opcode.Opcode
- )
-
- fieldNum := 0
-
- for _, op := range ne.Params() {
- currentOp = op.Code()
-
- switch {
- case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4:
- if fieldNum == expectedItemNumEACL {
- return nil, event.UnexpectedArgNumErr(SetEACLNotaryEvent)
- }
-
- setEACLFieldSetters[fieldNum](&ev, op.Param())
- fieldNum++
- default:
- return nil, event.UnexpectedOpcode(SetEACLNotaryEvent, op.Code())
- }
- }
-
- ev.notaryRequest = ne.Raw()
-
- return ev, nil
-}
diff --git a/pkg/morph/event/container/eacl_test.go b/pkg/morph/event/container/eacl_test.go
deleted file mode 100644
index 2f0598597f..0000000000
--- a/pkg/morph/event/container/eacl_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package container
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestParseEACL(t *testing.T) {
- var (
- binaryTable = []byte("table")
- signature = []byte("signature")
- publicKey = []byte("pubkey")
- token = []byte("token")
- )
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- items := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParseSetEACL(createNotifyEventFromItems(items))
- require.EqualError(t, err, event.WrongNumberOfParameters(4, len(items)).Error())
- })
-
- t.Run("wrong container parameter", func(t *testing.T) {
- _, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong signature parameter", func(t *testing.T) {
- _, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binaryTable),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong key parameter", func(t *testing.T) {
- _, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binaryTable),
- stackitem.NewByteArray(signature),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong session token parameter", func(t *testing.T) {
- _, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binaryTable),
- stackitem.NewByteArray(signature),
- stackitem.NewByteArray(publicKey),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binaryTable),
- stackitem.NewByteArray(signature),
- stackitem.NewByteArray(publicKey),
- stackitem.NewByteArray(token),
- }))
- require.NoError(t, err)
-
- e := ev.(SetEACL)
-
- require.Equal(t, binaryTable, e.Table())
- require.Equal(t, signature, e.Signature())
- require.Equal(t, publicKey, e.PublicKey())
- require.Equal(t, token, e.SessionToken())
- })
-}
-
-func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
- return &state.ContainedNotificationEvent{
- NotificationEvent: state.NotificationEvent{
- Item: stackitem.NewArray(items),
- },
- }
-}
diff --git a/pkg/morph/event/container/estimates.go b/pkg/morph/event/container/estimates.go
deleted file mode 100644
index 9fd21e2b51..0000000000
--- a/pkg/morph/event/container/estimates.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package container
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-// StartEstimation structure of container.StartEstimation notification from
-// morph chain.
-type StartEstimation struct {
- epoch uint64
-}
-
-// StopEstimation structure of container.StopEstimation notification from
-// morph chain.
-type StopEstimation struct {
- epoch uint64
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (StartEstimation) MorphEvent() {}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (StopEstimation) MorphEvent() {}
-
-// Epoch returns epoch value for which to start container size estimation.
-func (s StartEstimation) Epoch() uint64 { return s.epoch }
-
-// Epoch returns epoch value for which to stop container size estimation.
-func (s StopEstimation) Epoch() uint64 { return s.epoch }
-
-// ParseStartEstimation from notification into container event structure.
-func ParseStartEstimation(e *state.ContainedNotificationEvent) (event.Event, error) {
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- epoch, err := parseEstimation(params)
- if err != nil {
- return nil, err
- }
-
- return StartEstimation{epoch: epoch}, nil
-}
-
-// ParseStopEstimation from notification into container event structure.
-func ParseStopEstimation(e *state.ContainedNotificationEvent) (event.Event, error) {
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- epoch, err := parseEstimation(params)
- if err != nil {
- return nil, err
- }
-
- return StopEstimation{epoch: epoch}, nil
-}
-
-func parseEstimation(params []stackitem.Item) (uint64, error) {
- if ln := len(params); ln != 1 {
- return 0, event.WrongNumberOfParameters(1, ln)
- }
-
- // parse container
- epoch, err := client.IntFromStackItem(params[0])
- if err != nil {
- return 0, fmt.Errorf("could not get estimation epoch: %w", err)
- }
-
- return uint64(epoch), nil
-}
diff --git a/pkg/morph/event/container/estimates_test.go b/pkg/morph/event/container/estimates_test.go
deleted file mode 100644
index be46e62c4e..0000000000
--- a/pkg/morph/event/container/estimates_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package container
-
-import (
- "math/big"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestStartEstimation(t *testing.T) {
- var epochNum uint64 = 100
- epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum))
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParseStartEstimation(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
- })
-
- t.Run("wrong estimation parameter", func(t *testing.T) {
- _, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{
- epochItem,
- }))
-
- require.NoError(t, err)
-
- require.Equal(t, StartEstimation{
- epochNum,
- }, ev)
- })
-}
-
-func TestStopEstimation(t *testing.T) {
- var epochNum uint64 = 100
- epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum))
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParseStopEstimation(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
- })
-
- t.Run("wrong estimation parameter", func(t *testing.T) {
- _, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{
- epochItem,
- }))
-
- require.NoError(t, err)
-
- require.Equal(t, StopEstimation{
- epochNum,
- }, ev)
- })
-}
diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go
index d163c68367..b09394ba4c 100644
--- a/pkg/morph/event/container/put.go
+++ b/pkg/morph/event/container/put.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -65,49 +65,6 @@ func (x PutNamed) Zone() string {
return x.zone
}
-// ParsePut from notification into container event structure.
-func ParsePut(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Put
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != expectedItemNumPut {
- return nil, event.WrongNumberOfParameters(expectedItemNumPut, ln)
- }
-
- // parse container
- ev.rawContainer, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get container: %w", err)
- }
-
- // parse signature
- ev.signature, err = client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get signature: %w", err)
- }
-
- // parse public key
- ev.publicKey, err = client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get public key: %w", err)
- }
-
- // parse session token
- ev.token, err = client.BytesFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get sesison token: %w", err)
- }
-
- return ev, nil
-}
-
// PutSuccess structures notification event of successful container creation
// thrown by Container contract.
type PutSuccess struct {
@@ -121,33 +78,14 @@ func (PutSuccess) MorphEvent() {}
// ParsePutSuccess decodes notification event thrown by Container contract into
// PutSuccess and returns it as event.Event.
func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- items, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
+ var pse container.PutSuccessEvent
+ if err := pse.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err)
}
- const expectedItemNumPutSuccess = 2
-
- if ln := len(items); ln != expectedItemNumPutSuccess {
- return nil, event.WrongNumberOfParameters(expectedItemNumPutSuccess, ln)
- }
-
- binID, err := client.BytesFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("parse container ID item: %w", err)
- }
-
- _, err = client.BytesFromStackItem(items[1])
- if err != nil {
- return nil, fmt.Errorf("parse public key item: %w", err)
- }
-
- var res PutSuccess
-
- err = res.ID.Decode(binID)
- if err != nil {
- return nil, fmt.Errorf("decode container ID: %w", err)
- }
-
- return res, nil
+ var cnr cid.ID
+ cnr.SetSHA256(pse.ContainerID)
+ return PutSuccess{
+ ID: cnr,
+ }, nil
}
diff --git a/pkg/morph/event/container/put_notary.go b/pkg/morph/event/container/put_notary.go
index f5779ced65..6b2ee7b0ac 100644
--- a/pkg/morph/event/container/put_notary.go
+++ b/pkg/morph/event/container/put_notary.go
@@ -46,7 +46,7 @@ const (
// put container requests.
PutNotaryEvent = "put"
- // PutNotaryEvent is an ID of notary "put named container" notification.
+ // PutNamedNotaryEvent is an ID of notary "put named container" notification.
PutNamedNotaryEvent = "putNamed"
)
diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go
index 2ccea296fd..dd5c7ea937 100644
--- a/pkg/morph/event/container/put_test.go
+++ b/pkg/morph/event/container/put_test.go
@@ -4,86 +4,12 @@ import (
"crypto/sha256"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
-func TestParsePut(t *testing.T) {
- var (
- containerData = []byte("containerData")
- signature = []byte("signature")
- publicKey = []byte("pubkey")
- token = []byte("token")
- )
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParsePut(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(expectedItemNumPut, len(prms)).Error())
- })
-
- t.Run("wrong container parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong signature parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(containerData),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong key parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(containerData),
- stackitem.NewByteArray(signature),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong session token parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(containerData),
- stackitem.NewByteArray(signature),
- stackitem.NewByteArray(publicKey),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(containerData),
- stackitem.NewByteArray(signature),
- stackitem.NewByteArray(publicKey),
- stackitem.NewByteArray(token),
- }))
- require.NoError(t, err)
-
- require.Equal(t, Put{
- rawContainer: containerData,
- signature: signature,
- publicKey: publicKey,
- token: token,
- }, ev)
- })
-}
-
func TestParsePutSuccess(t *testing.T) {
t.Run("wrong number of parameters", func(t *testing.T) {
prms := []stackitem.Item{
@@ -91,7 +17,7 @@ func TestParsePutSuccess(t *testing.T) {
}
_, err := ParsePutSuccess(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong container ID parameter", func(t *testing.T) {
@@ -109,18 +35,30 @@ func TestParsePutSuccess(t *testing.T) {
id.Encode(binID)
t.Run("wrong public key parameter", func(t *testing.T) {
- _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binID),
- stackitem.NewMap(),
- }))
+ t.Run("wrong type", func(t *testing.T) {
+ _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(binID),
+ stackitem.NewMap(),
+ }))
- require.Error(t, err)
+ require.Error(t, err)
+ })
+ t.Run("garbage data", func(t *testing.T) {
+ _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(binID),
+ stackitem.NewByteArray([]byte("key")),
+ }))
+ require.Error(t, err)
+ })
})
t.Run("correct behavior", func(t *testing.T) {
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(binID),
- stackitem.NewByteArray([]byte("key")),
+ stackitem.NewByteArray(pk.PublicKey().Bytes()),
}))
require.NoError(t, err)
diff --git a/pkg/morph/event/container/util_test.go b/pkg/morph/event/container/util_test.go
new file mode 100644
index 0000000000..159f6cd9f4
--- /dev/null
+++ b/pkg/morph/event/container/util_test.go
@@ -0,0 +1,14 @@
+package container
+
+import (
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
+ return &state.ContainedNotificationEvent{
+ NotificationEvent: state.NotificationEvent{
+ Item: stackitem.NewArray(items),
+ },
+ }
+}
diff --git a/pkg/morph/event/frostfs/bind.go b/pkg/morph/event/frostfs/bind.go
deleted file mode 100644
index 49d10d3c39..0000000000
--- a/pkg/morph/event/frostfs/bind.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package frostfs
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-type Bind struct {
- bindCommon
-}
-
-type bindCommon struct {
- user []byte
- keys [][]byte
-
- // txHash is used in notary environmental
- // for calculating unique but same for
- // all notification receivers values.
- txHash util.Uint256
-}
-
-// TxHash returns hash of the TX with new epoch
-// notification.
-func (b bindCommon) TxHash() util.Uint256 {
- return b.txHash
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (bindCommon) MorphEvent() {}
-
-func (b bindCommon) Keys() [][]byte { return b.keys }
-
-func (b bindCommon) User() []byte { return b.user }
-
-func ParseBind(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Bind
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- err = parseBind(&ev.bindCommon, params)
- if err != nil {
- return nil, err
- }
-
- ev.txHash = e.Container
-
- return ev, nil
-}
-
-func parseBind(dst *bindCommon, params []stackitem.Item) error {
- if ln := len(params); ln != 2 {
- return event.WrongNumberOfParameters(2, ln)
- }
-
- var err error
-
- // parse user
- dst.user, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return fmt.Errorf("could not get bind user: %w", err)
- }
-
- // parse keys
- bindKeys, err := client.ArrayFromStackItem(params[1])
- if err != nil {
- return fmt.Errorf("could not get bind keys: %w", err)
- }
-
- dst.keys = make([][]byte, 0, len(bindKeys))
-
- for i := range bindKeys {
- rawKey, err := client.BytesFromStackItem(bindKeys[i])
- if err != nil {
- return fmt.Errorf("could not get bind public key: %w", err)
- }
-
- dst.keys = append(dst.keys, rawKey)
- }
-
- return nil
-}
diff --git a/pkg/morph/event/frostfs/bind_test.go b/pkg/morph/event/frostfs/bind_test.go
deleted file mode 100644
index 38a82b8d4b..0000000000
--- a/pkg/morph/event/frostfs/bind_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package frostfs
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestParseBind(t *testing.T) {
- var (
- user = []byte{0x1, 0x2, 0x3}
- publicKeys = [][]byte{
- []byte("key1"),
- []byte("key2"),
- []byte("key3"),
- }
- )
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- }
-
- _, err := ParseBind(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
- })
-
- t.Run("wrong first parameter", func(t *testing.T) {
- _, err := ParseBind(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong second parameter", func(t *testing.T) {
- _, err := ParseBind(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(user),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct", func(t *testing.T) {
- ev, err := ParseBind(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(user),
- stackitem.NewArray([]stackitem.Item{
- stackitem.NewByteArray(publicKeys[0]),
- stackitem.NewByteArray(publicKeys[1]),
- stackitem.NewByteArray(publicKeys[2]),
- }),
- }))
- require.NoError(t, err)
-
- e := ev.(Bind)
-
- require.Equal(t, user, e.User())
- require.Equal(t, publicKeys, e.Keys())
- })
-}
-
-func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
- return &state.ContainedNotificationEvent{
- NotificationEvent: state.NotificationEvent{
- Item: stackitem.NewArray(items),
- },
- }
-}
diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go
index 239ddb1a42..cf56464b80 100644
--- a/pkg/morph/event/frostfs/cheque.go
+++ b/pkg/morph/event/frostfs/cheque.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -11,76 +11,43 @@ import (
// Cheque structure of frostfs.Cheque notification from mainnet chain.
type Cheque struct {
- id []byte
- amount int64 // Fixed8
- user util.Uint160
- lock util.Uint160
+ IDValue []byte
+ AmountValue int64 // Fixed8
+ UserValue util.Uint160
+ LockValue util.Uint160
}
// MorphEvent implements Neo:Morph Event interface.
func (Cheque) MorphEvent() {}
// ID is a withdraw transaction hash.
-func (c Cheque) ID() []byte { return c.id }
+func (c Cheque) ID() []byte { return c.IDValue }
// User returns withdraw receiver script hash from main net.
-func (c Cheque) User() util.Uint160 { return c.user }
+func (c Cheque) User() util.Uint160 { return c.UserValue }
// Amount of the sent assets.
-func (c Cheque) Amount() int64 { return c.amount }
+func (c Cheque) Amount() int64 { return c.AmountValue }
// LockAccount return script hash for balance contract wallet.
-func (c Cheque) LockAccount() util.Uint160 { return c.lock }
+func (c Cheque) LockAccount() util.Uint160 { return c.LockValue }
// ParseCheque from notification into cheque structure.
func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Cheque
- err error
- )
+ var ce frostfs.ChequeEvent
+ if err := ce.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err)
+ }
- params, err := event.ParseStackArray(e)
+ lock, err := util.Uint160DecodeBytesBE(ce.LockAccount)
if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err)
}
- if ln := len(params); ln != 4 {
- return nil, event.WrongNumberOfParameters(4, ln)
- }
-
- // parse id
- ev.id, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque id: %w", err)
- }
-
- // parse user
- user, err := client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque user: %w", err)
- }
-
- ev.user, err = util.Uint160DecodeBytesBE(user)
- if err != nil {
- return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err)
- }
-
- // parse amount
- ev.amount, err = client.IntFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque amount: %w", err)
- }
-
- // parse lock account
- lock, err := client.BytesFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get cheque lock account: %w", err)
- }
-
- ev.lock, err = util.Uint160DecodeBytesBE(lock)
- if err != nil {
- return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err)
- }
-
- return ev, nil
+ return Cheque{
+ IDValue: ce.Id,
+ AmountValue: ce.Amount.Int64(),
+ UserValue: ce.User,
+ LockValue: lock,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go
index 861f05a68d..d92b7922b3 100644
--- a/pkg/morph/event/frostfs/cheque_test.go
+++ b/pkg/morph/event/frostfs/cheque_test.go
@@ -4,7 +4,7 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -26,7 +26,7 @@ func TestParseCheque(t *testing.T) {
}
_, err := ParseCheque(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong id parameter", func(t *testing.T) {
@@ -77,10 +77,18 @@ func TestParseCheque(t *testing.T) {
require.NoError(t, err)
require.Equal(t, Cheque{
- id: id,
- amount: amount,
- user: user,
- lock: lock,
+ IDValue: id,
+ AmountValue: amount,
+ UserValue: user,
+ LockValue: lock,
}, ev)
})
}
+
+func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
+ return &state.ContainedNotificationEvent{
+ NotificationEvent: state.NotificationEvent{
+ Item: stackitem.NewArray(items),
+ },
+ }
+}
diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go
index 1b9824b398..805e80f3c9 100644
--- a/pkg/morph/event/frostfs/config.go
+++ b/pkg/morph/event/frostfs/config.go
@@ -3,72 +3,48 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
)
type Config struct {
- key []byte
- value []byte
- id []byte
+ KeyValue []byte
+ ValueValue []byte
+ IDValue []byte
- // txHash is used in notary environmental
+ // TxHashValue is used in notary environmental
// for calculating unique but same for
// all notification receivers values.
- txHash util.Uint256
+ TxHashValue util.Uint256
}
// TxHash returns hash of the TX with new epoch
// notification.
func (u Config) TxHash() util.Uint256 {
- return u.txHash
+ return u.TxHashValue
}
// MorphEvent implements Neo:Morph Event interface.
func (Config) MorphEvent() {}
-func (u Config) ID() []byte { return u.id }
+func (u Config) ID() []byte { return u.IDValue }
-func (u Config) Key() []byte { return u.key }
+func (u Config) Key() []byte { return u.KeyValue }
-func (u Config) Value() []byte { return u.value }
+func (u Config) Value() []byte { return u.ValueValue }
func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Config
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var sce frostfs.SetConfigEvent
+ if err := sce.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err)
}
- if ln := len(params); ln != 3 {
- return nil, event.WrongNumberOfParameters(3, ln)
- }
-
- // parse id
- ev.id, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get config update id: %w", err)
- }
-
- // parse key
- ev.key, err = client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get config key: %w", err)
- }
-
- // parse value
- ev.value, err = client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get config value: %w", err)
- }
-
- ev.txHash = e.Container
-
- return ev, nil
+ return Config{
+ KeyValue: sce.Key,
+ ValueValue: sce.Value,
+ IDValue: sce.Id,
+ TxHashValue: e.Container,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go
index b56c8ecb2e..8acc8c15ce 100644
--- a/pkg/morph/event/frostfs/config_test.go
+++ b/pkg/morph/event/frostfs/config_test.go
@@ -3,7 +3,6 @@ package frostfs
import (
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -21,7 +20,7 @@ func TestParseConfig(t *testing.T) {
}
_, err := ParseConfig(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong first parameter", func(t *testing.T) {
@@ -60,9 +59,9 @@ func TestParseConfig(t *testing.T) {
require.NoError(t, err)
require.Equal(t, Config{
- id: id,
- key: key,
- value: value,
+ IDValue: id,
+ KeyValue: key,
+ ValueValue: value,
}, ev)
})
}
diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go
index b9467d112a..fcb01577ed 100644
--- a/pkg/morph/event/frostfs/deposit.go
+++ b/pkg/morph/event/frostfs/deposit.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -11,73 +11,38 @@ import (
// Deposit structure of frostfs.Deposit notification from mainnet chain.
type Deposit struct {
- id []byte
- amount int64 // Fixed8
- from util.Uint160
- to util.Uint160
+ IDValue []byte
+ AmountValue int64 // Fixed8
+ FromValue util.Uint160
+ ToValue util.Uint160
}
// MorphEvent implements Neo:Morph Event interface.
func (Deposit) MorphEvent() {}
// ID is a deposit transaction hash.
-func (d Deposit) ID() []byte { return d.id }
+func (d Deposit) ID() []byte { return d.IDValue }
// From is a script hash of asset sender in main net.
-func (d Deposit) From() util.Uint160 { return d.from }
+func (d Deposit) From() util.Uint160 { return d.FromValue }
// To is a script hash of asset receiver in balance contract.
-func (d Deposit) To() util.Uint160 { return d.to }
+func (d Deposit) To() util.Uint160 { return d.ToValue }
// Amount of transferred assets.
-func (d Deposit) Amount() int64 { return d.amount }
+func (d Deposit) Amount() int64 { return d.AmountValue }
// ParseDeposit notification into deposit structure.
func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) {
- var ev Deposit
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var de frostfs.DepositEvent
+ if err := de.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err)
}
- if ln := len(params); ln != 4 {
- return nil, event.WrongNumberOfParameters(4, ln)
- }
-
- // parse from
- from, err := client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit sender: %w", err)
- }
-
- ev.from, err = util.Uint160DecodeBytesBE(from)
- if err != nil {
- return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err)
- }
-
- // parse amount
- ev.amount, err = client.IntFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit amount: %w", err)
- }
-
- // parse to
- to, err := client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit receiver: %w", err)
- }
-
- ev.to, err = util.Uint160DecodeBytesBE(to)
- if err != nil {
- return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err)
- }
-
- // parse id
- ev.id, err = client.BytesFromStackItem(params[3])
- if err != nil {
- return nil, fmt.Errorf("could not get deposit id: %w", err)
- }
-
- return ev, nil
+ return Deposit{
+ IDValue: de.TxHash[:],
+ AmountValue: de.Amount.Int64(),
+ FromValue: de.From,
+ ToValue: de.Receiver,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go
index 0f52e21191..38d3e61f6a 100644
--- a/pkg/morph/event/frostfs/deposit_test.go
+++ b/pkg/morph/event/frostfs/deposit_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -12,7 +11,7 @@ import (
func TestParseDeposit(t *testing.T) {
var (
- id = []byte("Hello World")
+ id = util.Uint256{0, 1, 2, 3}
from = util.Uint160{0x1, 0x2, 0x3}
to = util.Uint160{0x3, 0x2, 0x1}
@@ -26,7 +25,7 @@ func TestParseDeposit(t *testing.T) {
}
_, err := ParseDeposit(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong from parameter", func(t *testing.T) {
@@ -72,15 +71,15 @@ func TestParseDeposit(t *testing.T) {
stackitem.NewByteArray(from.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
stackitem.NewByteArray(to.BytesBE()),
- stackitem.NewByteArray(id),
+ stackitem.NewByteArray(id[:]),
}))
require.NoError(t, err)
require.Equal(t, Deposit{
- id: id,
- amount: amount,
- from: from,
- to: to,
+ IDValue: id[:],
+ AmountValue: amount,
+ FromValue: from,
+ ToValue: to,
}, ev)
})
}
diff --git a/pkg/morph/event/frostfs/ir_update.go b/pkg/morph/event/frostfs/ir_update.go
deleted file mode 100644
index 62203540f0..0000000000
--- a/pkg/morph/event/frostfs/ir_update.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package frostfs
-
-import (
- "crypto/elliptic"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-type UpdateInnerRing struct {
- keys []*keys.PublicKey
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (UpdateInnerRing) MorphEvent() {}
-
-func (u UpdateInnerRing) Keys() []*keys.PublicKey { return u.keys }
-
-func ParseUpdateInnerRing(params []stackitem.Item) (event.Event, error) {
- var (
- ev UpdateInnerRing
- err error
- )
-
- if ln := len(params); ln != 1 {
- return nil, event.WrongNumberOfParameters(1, ln)
- }
-
- // parse keys
- irKeys, err := client.ArrayFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get updated inner ring keys: %w", err)
- }
-
- ev.keys = make([]*keys.PublicKey, 0, len(irKeys))
- for i := range irKeys {
- rawKey, err := client.BytesFromStackItem(irKeys[i])
- if err != nil {
- return nil, fmt.Errorf("could not get updated inner ring public key: %w", err)
- }
-
- key, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256())
- if err != nil {
- return nil, fmt.Errorf("could not parse updated inner ring public key: %w", err)
- }
-
- ev.keys = append(ev.keys, key)
- }
-
- return ev, nil
-}
diff --git a/pkg/morph/event/frostfs/ir_update_test.go b/pkg/morph/event/frostfs/ir_update_test.go
deleted file mode 100644
index 8ce6fdc366..0000000000
--- a/pkg/morph/event/frostfs/ir_update_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package frostfs
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func genKey(t *testing.T) *keys.PrivateKey {
- priv, err := keys.NewPrivateKey()
- require.NoError(t, err)
- return priv
-}
-
-func TestParseUpdateInnerRing(t *testing.T) {
- var (
- publicKeys = []*keys.PublicKey{
- genKey(t).PublicKey(),
- genKey(t).PublicKey(),
- genKey(t).PublicKey(),
- }
- )
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParseUpdateInnerRing(prms)
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
- })
-
- t.Run("wrong first parameter", func(t *testing.T) {
- _, err := ParseUpdateInnerRing([]stackitem.Item{
- stackitem.NewMap(),
- })
-
- require.Error(t, err)
- })
-
- t.Run("correct", func(t *testing.T) {
- ev, err := ParseUpdateInnerRing([]stackitem.Item{
- stackitem.NewArray([]stackitem.Item{
- stackitem.NewByteArray(publicKeys[0].Bytes()),
- stackitem.NewByteArray(publicKeys[1].Bytes()),
- stackitem.NewByteArray(publicKeys[2].Bytes()),
- }),
- })
- require.NoError(t, err)
-
- require.Equal(t, UpdateInnerRing{
- keys: publicKeys,
- }, ev)
- })
-}
diff --git a/pkg/morph/event/frostfs/unbind.go b/pkg/morph/event/frostfs/unbind.go
deleted file mode 100644
index f88d679947..0000000000
--- a/pkg/morph/event/frostfs/unbind.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package frostfs
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
-)
-
-type Unbind struct {
- bindCommon
-}
-
-func ParseUnbind(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Unbind
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- err = parseBind(&ev.bindCommon, params)
- if err != nil {
- return nil, err
- }
-
- ev.txHash = e.Container
-
- return ev, nil
-}
diff --git a/pkg/morph/event/frostfs/unbind_test.go b/pkg/morph/event/frostfs/unbind_test.go
deleted file mode 100644
index 4b79d7c488..0000000000
--- a/pkg/morph/event/frostfs/unbind_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package frostfs
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestParseUnbind(t *testing.T) {
- var (
- user = []byte{0x1, 0x2, 0x3}
- publicKeys = [][]byte{
- []byte("key1"),
- []byte("key2"),
- []byte("key3"),
- }
- )
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- }
-
- _, err := ParseUnbind(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
- })
-
- t.Run("wrong first parameter", func(t *testing.T) {
- _, err := ParseUnbind(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong second parameter", func(t *testing.T) {
- _, err := ParseUnbind(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(user),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct", func(t *testing.T) {
- ev, err := ParseUnbind(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(user),
- stackitem.NewArray([]stackitem.Item{
- stackitem.NewByteArray(publicKeys[0]),
- stackitem.NewByteArray(publicKeys[1]),
- stackitem.NewByteArray(publicKeys[2]),
- }),
- }))
- require.NoError(t, err)
-
- e := ev.(Unbind)
-
- require.Equal(t, user, e.User())
- require.Equal(t, publicKeys, e.Keys())
- })
-}
diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go
index 3bbf76c2cb..2568b65127 100644
--- a/pkg/morph/event/frostfs/withdraw.go
+++ b/pkg/morph/event/frostfs/withdraw.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -11,58 +11,33 @@ import (
// Withdraw structure of frostfs.Withdraw notification from mainnet chain.
type Withdraw struct {
- id []byte
- amount int64 // Fixed8
- user util.Uint160
+ IDValue []byte
+ AmountValue int64 // Fixed8
+ UserValue util.Uint160
}
// MorphEvent implements Neo:Morph Event interface.
func (Withdraw) MorphEvent() {}
// ID is a withdraw transaction hash.
-func (w Withdraw) ID() []byte { return w.id }
+func (w Withdraw) ID() []byte { return w.IDValue }
// User returns withdraw receiver script hash from main net.
-func (w Withdraw) User() util.Uint160 { return w.user }
+func (w Withdraw) User() util.Uint160 { return w.UserValue }
// Amount of the withdraw assets.
-func (w Withdraw) Amount() int64 { return w.amount }
+func (w Withdraw) Amount() int64 { return w.AmountValue }
// ParseWithdraw notification into withdraw structure.
func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) {
- var ev Withdraw
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ var we frostfs.WithdrawEvent
+ if err := we.FromStackItem(e.Item); err != nil {
+ return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err)
}
- if ln := len(params); ln != 3 {
- return nil, event.WrongNumberOfParameters(3, ln)
- }
-
- // parse user
- user, err := client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get withdraw user: %w", err)
- }
-
- ev.user, err = util.Uint160DecodeBytesBE(user)
- if err != nil {
- return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err)
- }
-
- // parse amount
- ev.amount, err = client.IntFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get withdraw amount: %w", err)
- }
-
- // parse id
- ev.id, err = client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get withdraw id: %w", err)
- }
-
- return ev, nil
+ return Withdraw{
+ IDValue: we.TxHash[:],
+ AmountValue: we.Amount.Int64(),
+ UserValue: we.User,
+ }, nil
}
diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go
index 5544283ef6..e382305e61 100644
--- a/pkg/morph/event/frostfs/withdraw_test.go
+++ b/pkg/morph/event/frostfs/withdraw_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -12,7 +11,7 @@ import (
func TestParseWithdraw(t *testing.T) {
var (
- id = []byte("Hello World")
+ id = util.Uint256{1, 2, 3}
user = util.Uint160{0x1, 0x2, 0x3}
amount int64 = 10
@@ -25,7 +24,7 @@ func TestParseWithdraw(t *testing.T) {
}
_, err := ParseWithdraw(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong user parameter", func(t *testing.T) {
@@ -59,14 +58,14 @@ func TestParseWithdraw(t *testing.T) {
ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(user.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
- stackitem.NewByteArray(id),
+ stackitem.NewByteArray(id[:]),
}))
require.NoError(t, err)
require.Equal(t, Withdraw{
- id: id,
- amount: amount,
- user: user,
+ IDValue: id[:],
+ AmountValue: amount,
+ UserValue: user,
}, ev)
})
}
diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go
index 182b4667e9..55a514ff16 100644
--- a/pkg/morph/event/handlers.go
+++ b/pkg/morph/event/handlers.go
@@ -1,32 +1,26 @@
package event
import (
+ "context"
+
"github.com/nspcc-dev/neo-go/pkg/core/block"
+ "github.com/nspcc-dev/neo-go/pkg/util"
)
// Handler is an Event processing function.
-type Handler func(Event)
+type Handler func(context.Context, Event)
// BlockHandler is a chain block processing function.
-type BlockHandler func(*block.Block)
+type BlockHandler func(context.Context, *block.Block)
// NotificationHandlerInfo is a structure that groups
// the parameters of the handler of particular
// contract event.
type NotificationHandlerInfo struct {
- scriptHashWithType
-
- h Handler
-}
-
-// SetHandler is an event handler setter.
-func (s *NotificationHandlerInfo) SetHandler(v Handler) {
- s.h = v
-}
-
-// Handler returns an event handler.
-func (s NotificationHandlerInfo) Handler() Handler {
- return s.h
+ Contract util.Uint160
+ Type Type
+ Parser NotificationParser
+ Handlers []Handler
}
// NotaryHandlerInfo is a structure that groups
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index 13ad868e45..e5cdfeef76 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -6,6 +6,7 @@ import (
"fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -32,13 +33,6 @@ type Listener interface {
// it could not be started.
ListenWithError(context.Context, chan<- error)
- // SetNotificationParser must set the parser of particular contract event.
- //
- // Parser of each event must be set once. All parsers must be set before Listen call.
- //
- // Must ignore nil parsers and all calls after listener has been started.
- SetNotificationParser(NotificationParserInfo)
-
// RegisterNotificationHandler must register the event handler for particular notification event of contract.
//
// The specified handler must be called after each capture and parsing of the event.
@@ -95,9 +89,9 @@ type ListenerParams struct {
type listener struct {
mtx sync.RWMutex
- startOnce, stopOnce sync.Once
+ wg sync.WaitGroup
- started bool
+ startOnce, stopOnce sync.Once
notificationParsers map[scriptHashWithType]NotificationParser
notificationHandlers map[scriptHashWithType][]Handler
@@ -117,12 +111,18 @@ type listener struct {
pool *ants.Pool
}
-const newListenerFailMsg = "could not instantiate Listener"
+const newListenerFailMsg = "instantiate Listener"
var (
errNilLogger = errors.New("nil logger")
errNilSubscriber = errors.New("nil event subscriber")
+
+ errNotificationSubscrConnectionTerminated = errors.New("event subscriber connection has been terminated")
+
+ errNotarySubscrConnectionTerminated = errors.New("notary event subscriber connection has been terminated")
+
+ errBlockNotificationChannelClosed = errors.New("new block notification channel is closed")
)
// Listen starts the listening for events with registered handlers.
@@ -132,11 +132,10 @@ var (
// Returns an error if listener was already started.
func (l *listener) Listen(ctx context.Context) {
l.startOnce.Do(func() {
- if err := l.listen(ctx, nil); err != nil {
- l.log.Error("could not start listen to events",
- zap.String("error", err.Error()),
- )
- }
+ l.wg.Add(1)
+ defer l.wg.Done()
+
+ l.listen(ctx, nil)
})
}
@@ -148,23 +147,31 @@ func (l *listener) Listen(ctx context.Context) {
// Returns an error if listener was already started.
func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
l.startOnce.Do(func() {
- if err := l.listen(ctx, intError); err != nil {
- l.log.Error("could not start listen to events",
- zap.String("error", err.Error()),
- )
- intError <- err
- }
+ l.wg.Add(1)
+ defer l.wg.Done()
+
+ l.listen(ctx, intError)
})
}
-func (l *listener) listen(ctx context.Context, intError chan<- error) error {
+func (l *listener) listen(ctx context.Context, intError chan<- error) {
+ subErrCh := make(chan error)
+
+ go l.subscribe(subErrCh)
+
+ l.listenLoop(ctx, intError, subErrCh)
+}
+
+func (l *listener) subscribe(errCh chan error) {
+ l.wg.Add(1)
+ defer l.wg.Done()
// create the list of listening contract hashes
hashes := make([]util.Uint160, 0)
// fill the list with the contracts with set event parsers.
l.mtx.RLock()
for hashType := range l.notificationParsers {
- scHash := hashType.ScriptHash()
+ scHash := hashType.Hash
// prevent repetitions
for _, hash := range hashes {
@@ -173,135 +180,131 @@ func (l *listener) listen(ctx context.Context, intError chan<- error) error {
}
}
- hashes = append(hashes, hashType.ScriptHash())
+ hashes = append(hashes, hashType.Hash)
}
-
- // mark listener as started
- l.started = true
-
l.mtx.RUnlock()
- chEvent, err := l.subscriber.SubscribeForNotification(hashes...)
+ err := l.subscriber.SubscribeForNotification(hashes...)
if err != nil {
- return err
+ errCh <- fmt.Errorf("subscribe for notifications: %w", err)
+ return
}
- l.listenLoop(ctx, chEvent, intError)
-
- return nil
-}
-
-func (l *listener) listenLoop(ctx context.Context, chEvent <-chan *state.ContainedNotificationEvent, intErr chan<- error) {
- var (
- blockChan <-chan *block.Block
-
- notaryChan <-chan *result.NotaryRequestEvent
-
- err error
- )
-
if len(l.blockHandlers) > 0 {
- if blockChan, err = l.subscriber.BlockNotifications(); err != nil {
- if intErr != nil {
- intErr <- fmt.Errorf("could not open block notifications channel: %w", err)
- } else {
- l.log.Debug("could not open block notifications channel",
- zap.String("error", err.Error()),
- )
- }
-
+ if err = l.subscriber.BlockNotifications(); err != nil {
+ errCh <- fmt.Errorf("subscribe for blocks: %w", err)
return
}
- } else {
- blockChan = make(chan *block.Block)
}
if l.listenNotary {
- if notaryChan, err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil {
- if intErr != nil {
- intErr <- fmt.Errorf("could not open notary notifications channel: %w", err)
- } else {
- l.log.Debug("could not open notary notifications channel",
- zap.String("error", err.Error()),
- )
- }
-
+ if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil {
+ errCh <- fmt.Errorf("subscribe for notary requests: %w", err)
return
}
}
+}
+
+func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error) bool {
+ if intErr == nil {
+ return false
+ }
+ // This select required because were are reading from error channel and closing listener
+ // in the same routine when shutting down node.
+ select {
+ case <-ctx.Done():
+ l.log.Info(ctx, logs.EventStopEventListenerByContext,
+ zap.String("reason", ctx.Err().Error()),
+ )
+ return false
+ case intErr <- err:
+ return true
+ }
+}
+
+func (l *listener) listenLoop(ctx context.Context, intErr chan<- error, subErrCh chan error) {
+ chs := l.subscriber.NotificationChannels()
loop:
for {
select {
+ case err := <-subErrCh:
+ if !l.sendError(ctx, intErr, err) {
+ l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err))
+ }
+ break loop
case <-ctx.Done():
- l.log.Info("stop event listener by context",
+ l.log.Info(ctx, logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
break loop
- case notifyEvent, ok := <-chEvent:
+ case notifyEvent, ok := <-chs.NotificationsCh:
if !ok {
- l.log.Warn("stop event listener by notification channel")
- if intErr != nil {
- intErr <- errors.New("event subscriber connection has been terminated")
- }
-
+ l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel)
+ l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated)
break loop
} else if notifyEvent == nil {
- l.log.Warn("nil notification event was caught")
+ l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught)
continue loop
}
- if err = l.pool.Submit(func() {
- l.parseAndHandleNotification(notifyEvent)
- }); err != nil {
- l.log.Warn("listener worker pool drained",
- zap.Int("capacity", l.pool.Cap()))
- }
- case notaryEvent, ok := <-notaryChan:
+ l.handleNotifyEvent(ctx, notifyEvent)
+ case notaryEvent, ok := <-chs.NotaryRequestsCh:
if !ok {
- l.log.Warn("stop event listener by notary channel")
- if intErr != nil {
- intErr <- errors.New("notary event subscriber connection has been terminated")
- }
-
+ l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel)
+ l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated)
break loop
} else if notaryEvent == nil {
- l.log.Warn("nil notary event was caught")
+ l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught)
continue loop
}
- if err = l.pool.Submit(func() {
- l.parseAndHandleNotary(notaryEvent)
- }); err != nil {
- l.log.Warn("listener worker pool drained",
- zap.Int("capacity", l.pool.Cap()))
- }
- case b, ok := <-blockChan:
+ l.handleNotaryEvent(ctx, notaryEvent)
+ case b, ok := <-chs.BlockCh:
if !ok {
- l.log.Warn("stop event listener by block channel")
- if intErr != nil {
- intErr <- errors.New("new block notification channel is closed")
- }
-
+ l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel)
+ l.sendError(ctx, intErr, errBlockNotificationChannelClosed)
break loop
} else if b == nil {
- l.log.Warn("nil block was caught")
+ l.log.Warn(ctx, logs.EventNilBlockWasCaught)
continue loop
}
- if err = l.pool.Submit(func() {
- for i := range l.blockHandlers {
- l.blockHandlers[i](b)
- }
- }); err != nil {
- l.log.Warn("listener worker pool drained",
- zap.Int("capacity", l.pool.Cap()))
- }
+ l.handleBlockEvent(ctx, b)
}
}
}
-func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) {
+ if err := l.pool.Submit(func() {
+ for i := range l.blockHandlers {
+ l.blockHandlers[i](ctx, b)
+ }
+ }); err != nil {
+ l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ zap.Int("capacity", l.pool.Cap()))
+ }
+}
+
+func (l *listener) handleNotaryEvent(ctx context.Context, notaryEvent *result.NotaryRequestEvent) {
+ if err := l.pool.Submit(func() {
+ l.parseAndHandleNotary(ctx, notaryEvent)
+ }); err != nil {
+ l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ zap.Int("capacity", l.pool.Cap()))
+ }
+}
+
+func (l *listener) handleNotifyEvent(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
+ if err := l.pool.Submit(func() {
+ l.parseAndHandleNotification(ctx, notifyEvent)
+ }); err != nil {
+ l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ zap.Int("capacity", l.pool.Cap()))
+ }
+}
+
+func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
log := l.log.With(
zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()),
)
@@ -314,16 +317,14 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
)
// get the event parser
- keyEvent := scriptHashWithType{}
- keyEvent.SetScriptHash(notifyEvent.ScriptHash)
- keyEvent.SetType(typEvent)
+ keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent}
l.mtx.RLock()
parser, ok := l.notificationParsers[keyEvent]
l.mtx.RUnlock()
if !ok {
- log.Debug("event parser not set")
+ log.Debug(ctx, logs.EventEventParserNotSet)
return
}
@@ -331,8 +332,8 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
// parse the notification event
event, err := parser(notifyEvent)
if err != nil {
- log.Warn("could not parse notification event",
- zap.String("error", err.Error()),
+ log.Warn(ctx, logs.EventCouldNotParseNotificationEvent,
+ zap.Error(err),
)
return
@@ -344,7 +345,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
l.mtx.RUnlock()
if len(handlers) == 0 {
- log.Info("notification handlers for parsed notification event were not registered",
+ log.Info(ctx, logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
@@ -352,23 +353,26 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
}
for _, handler := range handlers {
- handler(event)
+ handler(ctx, event)
}
}
-func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
+func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRequestEvent) {
// prepare the notary event
notaryEvent, err := l.notaryEventsPreparator.Prepare(nr.NotaryRequest)
if err != nil {
+ var expErr *ExpiredTXError
switch {
case errors.Is(err, ErrTXAlreadyHandled):
- case errors.Is(err, ErrMainTXExpired):
- l.log.Warn("skip expired main TX notary event",
- zap.String("error", err.Error()),
+ case errors.As(err, &expErr):
+ l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent,
+ zap.Error(err),
+ zap.Uint32("current_block_height", expErr.CurrentBlockHeight),
+ zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight),
)
default:
- l.log.Warn("could not prepare and validate notary event",
- zap.String("error", err.Error()),
+ l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent,
+ zap.Error(err),
)
}
@@ -391,7 +395,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
l.mtx.RUnlock()
if !ok {
- log.Debug("notary parser not set")
+ log.Debug(ctx, logs.EventNotaryParserNotSet)
return
}
@@ -399,8 +403,8 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
// parse the notary event
event, err := parser(notaryEvent)
if err != nil {
- log.Warn("could not parse notary event",
- zap.String("error", err.Error()),
+ log.Warn(ctx, logs.EventCouldNotParseNotaryEvent,
+ zap.Error(err),
)
return
@@ -412,47 +416,14 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
l.mtx.RUnlock()
if !ok {
- log.Info("notary handlers for parsed notification event were not registered",
+ log.Info(ctx, logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
return
}
- handler(event)
-}
-
-// SetNotificationParser sets the parser of particular contract event.
-//
-// Ignores nil and already set parsers.
-// Ignores the parser if listener is started.
-func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
- log := l.log.With(
- zap.String("contract", pi.ScriptHash().StringLE()),
- zap.Stringer("event_type", pi.getType()),
- )
-
- parser := pi.parser()
- if parser == nil {
- log.Info("ignore nil event parser")
- return
- }
-
- l.mtx.Lock()
- defer l.mtx.Unlock()
-
- // check if the listener was started
- if l.started {
- log.Warn("listener has been already started, ignore parser")
- return
- }
-
- // add event parser
- if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok {
- l.notificationParsers[pi.scriptHashWithType] = pi.parser()
- }
-
- log.Debug("registered new event parser")
+ handler(ctx, event)
}
// RegisterNotificationHandler registers the handler for particular notification event of contract.
@@ -461,35 +432,23 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
// Ignores handlers of event without parser.
func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
log := l.log.With(
- zap.String("contract", hi.ScriptHash().StringLE()),
- zap.Stringer("event_type", hi.GetType()),
+ zap.String("contract", hi.Contract.StringLE()),
+ zap.Stringer("event_type", hi.Type),
)
- handler := hi.Handler()
- if handler == nil {
- log.Warn("ignore nil event handler")
- return
- }
-
// check if parser was set
- l.mtx.RLock()
- _, ok := l.notificationParsers[hi.scriptHashWithType]
- l.mtx.RUnlock()
-
- if !ok {
- log.Warn("ignore handler of event w/o parser")
- return
- }
-
- // add event handler
l.mtx.Lock()
- l.notificationHandlers[hi.scriptHashWithType] = append(
- l.notificationHandlers[hi.scriptHashWithType],
- hi.Handler(),
- )
- l.mtx.Unlock()
+ defer l.mtx.Unlock()
- log.Debug("registered new event handler")
+ k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type}
+
+ l.notificationParsers[k] = hi.Parser
+ l.notificationHandlers[k] = append(
+ l.notificationHandlers[k],
+ hi.Handlers...,
+ )
+
+ log.Debug(context.Background(), logs.EventRegisteredNewEventHandler)
}
// EnableNotarySupport enables notary request listening. Passed hash is
@@ -528,27 +487,15 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
zap.Stringer("notary_type", pi.RequestType()),
)
- parser := pi.parser()
- if parser == nil {
- log.Info("ignore nil notary event parser")
- return
- }
-
l.mtx.Lock()
defer l.mtx.Unlock()
- // check if the listener was started
- if l.started {
- log.Warn("listener has been already started, ignore notary parser")
- return
- }
-
// add event parser
if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok {
l.notaryParsers[pi.notaryRequestTypes] = pi.parser()
}
- log.Info("registered new event parser")
+ log.Info(context.Background(), logs.EventRegisteredNewEventParser)
}
// RegisterNotaryHandler registers the handler for particular notification notary request event.
@@ -566,19 +513,13 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
zap.Stringer("notary type", hi.RequestType()),
)
- handler := hi.Handler()
- if handler == nil {
- log.Warn("ignore nil notary event handler")
- return
- }
-
// check if parser was set
l.mtx.RLock()
_, ok := l.notaryParsers[hi.notaryRequestTypes]
l.mtx.RUnlock()
if !ok {
- log.Warn("ignore handler of notary event w/o parser")
+ log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser)
return
}
@@ -587,32 +528,24 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler()
l.mtx.Unlock()
- log.Info("registered new event handler")
+ log.Info(context.Background(), logs.EventRegisteredNewEventHandler)
}
// Stop closes subscription channel with remote neo node.
func (l *listener) Stop() {
l.stopOnce.Do(func() {
l.subscriber.Close()
+ l.pool.Release()
})
+ l.wg.Wait()
}
func (l *listener) RegisterBlockHandler(handler BlockHandler) {
- if handler == nil {
- l.log.Warn("ignore nil block handler")
- return
- }
-
l.blockHandlers = append(l.blockHandlers, handler)
}
// NewListener create the notification event listener instance and returns Listener interface.
func NewListener(p ListenerParams) (Listener, error) {
- // defaultPoolCap is a default worker
- // pool capacity if it was not specified
- // via params
- const defaultPoolCap = 10
-
switch {
case p.Logger == nil:
return nil, fmt.Errorf("%s: %w", newListenerFailMsg, errNilLogger)
@@ -620,14 +553,11 @@ func NewListener(p ListenerParams) (Listener, error) {
return nil, fmt.Errorf("%s: %w", newListenerFailMsg, errNilSubscriber)
}
- poolCap := p.WorkerPoolCapacity
- if poolCap == 0 {
- poolCap = defaultPoolCap
- }
-
- pool, err := ants.NewPool(poolCap, ants.WithNonblocking(true))
+ // The pool here must be blocking, otherwise notifications could be dropped.
+ // The default capacity is 0, which means "infinite".
+ pool, err := ants.NewPool(p.WorkerPoolCapacity)
if err != nil {
- return nil, fmt.Errorf("could not init worker pool: %w", err)
+ return nil, fmt.Errorf("init worker pool: %w", err)
}
return &listener{
diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go
new file mode 100644
index 0000000000..87f37305ff
--- /dev/null
+++ b/pkg/morph/event/listener_test.go
@@ -0,0 +1,177 @@
+package event
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "github.com/nspcc-dev/neo-go/pkg/core/block"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEventHandling(t *testing.T) {
+ blockCh := make(chan *block.Block)
+ notificationCh := make(chan *state.ContainedNotificationEvent)
+ notaryRequestsCh := make(chan *result.NotaryRequestEvent)
+
+ l, err := NewListener(ListenerParams{
+ Logger: test.NewLogger(t),
+ Subscriber: &testSubscriber{
+ blockCh: blockCh,
+ notificationCh: notificationCh,
+ notaryRequestsCh: notaryRequestsCh,
+ },
+ WorkerPoolCapacity: 10,
+ })
+ require.NoError(t, err, "failed to create listener")
+
+ list := l.(*listener)
+
+ blockHandled := make(chan bool)
+ handledBlocks := make([]*block.Block, 0)
+ l.RegisterBlockHandler(func(_ context.Context, b *block.Block) {
+ handledBlocks = append(handledBlocks, b)
+ blockHandled <- true
+ })
+
+ notificationHandled := make(chan bool)
+ handledNotifications := make([]Event, 0)
+ l.RegisterNotificationHandler(NotificationHandlerInfo{
+ Contract: util.Uint160{100},
+ Type: TypeFromString("notification type"),
+ Parser: func(cne *state.ContainedNotificationEvent) (Event, error) {
+ return testNotificationEvent{source: cne}, nil
+ },
+ Handlers: []Handler{
+ func(_ context.Context, e Event) {
+ handledNotifications = append(handledNotifications, e)
+ notificationHandled <- true
+ },
+ },
+ })
+
+ go list.Listen(context.Background())
+
+ t.Run("handles block events", func(t *testing.T) {
+ block := &block.Block{}
+
+ blockCh <- block
+
+ <-blockHandled
+
+ require.Equal(t, 1, len(handledBlocks), "invalid handled blocks length")
+ require.Equal(t, block, handledBlocks[0], "invalid handled block")
+ })
+
+ t.Run("handles notifications", func(t *testing.T) {
+ notification := &state.ContainedNotificationEvent{
+ Container: util.Uint256{49},
+ NotificationEvent: state.NotificationEvent{
+ ScriptHash: util.Uint160{100},
+ Name: "notification type",
+ },
+ }
+
+ notificationCh <- notification
+
+ <-notificationHandled
+ require.EqualValues(t, []Event{testNotificationEvent{source: notification}}, handledNotifications, "invalid handled notifications")
+ })
+}
+
+func TestErrorPassing(t *testing.T) {
+ blockCh := make(chan *block.Block)
+ notificationCh := make(chan *state.ContainedNotificationEvent)
+ notaryRequestsCh := make(chan *result.NotaryRequestEvent)
+
+ t.Run("notification error", func(t *testing.T) {
+ nErr := fmt.Errorf("notification error")
+ l, err := NewListener(ListenerParams{
+ Logger: test.NewLogger(t),
+ Subscriber: &testSubscriber{
+ blockCh: blockCh,
+ notificationCh: notificationCh,
+ notaryRequestsCh: notaryRequestsCh,
+
+ notificationErr: nErr,
+ },
+ WorkerPoolCapacity: 10,
+ })
+ require.NoError(t, err, "failed to create listener")
+
+ errCh := make(chan error)
+
+ go l.ListenWithError(context.Background(), errCh)
+
+ err = <-errCh
+
+ require.ErrorIs(t, err, nErr, "invalid notification error")
+ })
+
+ t.Run("block error", func(t *testing.T) {
+ bErr := fmt.Errorf("notification error")
+ l, err := NewListener(ListenerParams{
+ Logger: test.NewLogger(t),
+ Subscriber: &testSubscriber{
+ blockCh: blockCh,
+ notificationCh: notificationCh,
+ notaryRequestsCh: notaryRequestsCh,
+
+ blockErr: bErr,
+ },
+ WorkerPoolCapacity: 10,
+ })
+ require.NoError(t, err, "failed to create listener")
+ l.RegisterBlockHandler(func(context.Context, *block.Block) {})
+
+ errCh := make(chan error)
+
+ go l.ListenWithError(context.Background(), errCh)
+
+ err = <-errCh
+
+ require.ErrorIs(t, err, bErr, "invalid block error")
+ })
+}
+
+type testSubscriber struct {
+ blockCh chan *block.Block
+ notificationCh chan *state.ContainedNotificationEvent
+ notaryRequestsCh chan *result.NotaryRequestEvent
+
+ blockErr error
+ notificationErr error
+}
+
+func (s *testSubscriber) SubscribeForNotification(...util.Uint160) error {
+ return s.notificationErr
+}
+func (s *testSubscriber) UnsubscribeForNotification() {}
+func (s *testSubscriber) BlockNotifications() error {
+ return s.blockErr
+}
+
+func (s *testSubscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) error {
+ return nil
+}
+
+func (s *testSubscriber) NotificationChannels() subscriber.NotificationChannels {
+ return subscriber.NotificationChannels{
+ BlockCh: s.blockCh,
+ NotificationsCh: s.notificationCh,
+ NotaryRequestsCh: s.notaryRequestsCh,
+ }
+}
+
+func (s *testSubscriber) Close() {}
+
+type testNotificationEvent struct {
+ source *state.ContainedNotificationEvent
+}
+
+func (e testNotificationEvent) MorphEvent() {}
diff --git a/pkg/morph/event/netmap/add_peer.go b/pkg/morph/event/netmap/add_peer.go
index 87cf94082b..80c5559fc4 100644
--- a/pkg/morph/event/netmap/add_peer.go
+++ b/pkg/morph/event/netmap/add_peer.go
@@ -1,56 +1,28 @@
package netmap
import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/network/payload"
)
type AddPeer struct {
- node []byte
+ NodeBytes []byte
// For notary notifications only.
// Contains raw transactions of notary request.
- notaryRequest *payload.P2PNotaryRequest
+ Request *payload.P2PNotaryRequest
}
// MorphEvent implements Neo:Morph Event interface.
func (AddPeer) MorphEvent() {}
func (s AddPeer) Node() []byte {
- return s.node
+ return s.NodeBytes
}
// NotaryRequest returns raw notary request if notification
// was received via notary service. Otherwise, returns nil.
func (s AddPeer) NotaryRequest() *payload.P2PNotaryRequest {
- return s.notaryRequest
+ return s.Request
}
const expectedItemNumAddPeer = 1
-
-func ParseAddPeer(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev AddPeer
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != expectedItemNumAddPeer {
- return nil, event.WrongNumberOfParameters(expectedItemNumAddPeer, ln)
- }
-
- ev.node, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get raw nodeinfo: %w", err)
- }
-
- return ev, nil
-}
diff --git a/pkg/morph/event/netmap/add_peer_notary.go b/pkg/morph/event/netmap/add_peer_notary.go
index a506b052d4..a24722a972 100644
--- a/pkg/morph/event/netmap/add_peer_notary.go
+++ b/pkg/morph/event/netmap/add_peer_notary.go
@@ -7,7 +7,7 @@ import (
func (s *AddPeer) setNode(v []byte) {
if v != nil {
- s.node = v
+ s.NodeBytes = v
}
}
@@ -43,7 +43,7 @@ func ParseAddPeerNotary(ne event.NotaryEvent) (event.Event, error) {
}
}
- ev.notaryRequest = ne.Raw()
+ ev.Request = ne.Raw()
return ev, nil
}
diff --git a/pkg/morph/event/netmap/add_peer_test.go b/pkg/morph/event/netmap/add_peer_test.go
index 1b8bcf40a6..4118bb8c8a 100644
--- a/pkg/morph/event/netmap/add_peer_test.go
+++ b/pkg/morph/event/netmap/add_peer_test.go
@@ -1,47 +1,10 @@
package netmap
import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
)
-func TestParseAddPeer(t *testing.T) {
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParseAddPeer(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
- })
-
- t.Run("wrong first parameter type", func(t *testing.T) {
- _, err := ParseAddPeer(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- info := []byte{1, 2, 3}
-
- ev, err := ParseAddPeer(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(info),
- }))
-
- require.NoError(t, err)
- require.Equal(t, AddPeer{
- node: info,
- }, ev)
- })
-}
-
func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
return &state.ContainedNotificationEvent{
NotificationEvent: state.NotificationEvent{
diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go
index 0eaa9f2853..39c8f62371 100644
--- a/pkg/morph/event/netmap/epoch.go
+++ b/pkg/morph/event/netmap/epoch.go
@@ -1,9 +1,7 @@
package netmap
import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -11,12 +9,12 @@ import (
// NewEpoch is a new epoch Neo:Morph event.
type NewEpoch struct {
- num uint64
+ Num uint64
- // txHash is used in notary environmental
+ // Hash is used in notary environmental
// for calculating unique but same for
// all notification receivers values.
- txHash util.Uint256
+ Hash util.Uint256
}
// MorphEvent implements Neo:Morph Event interface.
@@ -24,35 +22,26 @@ func (NewEpoch) MorphEvent() {}
// EpochNumber returns new epoch number.
func (s NewEpoch) EpochNumber() uint64 {
- return s.num
+ return s.Num
}
// TxHash returns hash of the TX with new epoch
// notification.
func (s NewEpoch) TxHash() util.Uint256 {
- return s.txHash
+ return s.Hash
}
// ParseNewEpoch is a parser of new epoch notification event.
//
// Result is type of NewEpoch.
func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) {
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != 1 {
- return nil, event.WrongNumberOfParameters(1, ln)
- }
-
- prmEpochNum, err := client.IntFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get integer epoch number: %w", err)
+ var nee netmap.NewEpochEvent
+ if err := nee.FromStackItem(e.Item); err != nil {
+ return nil, err
}
return NewEpoch{
- num: uint64(prmEpochNum),
- txHash: e.Container,
+ Num: nee.Epoch.Uint64(),
+ Hash: e.Container,
}, nil
}
diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go
index b175b52756..6ff6923272 100644
--- a/pkg/morph/event/netmap/epoch_test.go
+++ b/pkg/morph/event/netmap/epoch_test.go
@@ -4,7 +4,6 @@ import (
"math/big"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -17,7 +16,7 @@ func TestParseNewEpoch(t *testing.T) {
}
_, err := ParseNewEpoch(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ require.Error(t, err)
})
t.Run("wrong first parameter type", func(t *testing.T) {
@@ -37,7 +36,7 @@ func TestParseNewEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, NewEpoch{
- num: epochNum,
+ Num: epochNum,
}, ev)
})
}
diff --git a/pkg/morph/event/netmap/update_peer.go b/pkg/morph/event/netmap/update_peer.go
index 535d57e4d7..e296711314 100644
--- a/pkg/morph/event/netmap/update_peer.go
+++ b/pkg/morph/event/netmap/update_peer.go
@@ -1,25 +1,21 @@
package netmap
import (
- "crypto/elliptic"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/network/payload"
)
type UpdatePeer struct {
- publicKey *keys.PublicKey
+ PubKey *keys.PublicKey
- state netmap.NodeState
+ State netmap.NodeState
// For notary notifications only.
// Contains raw transactions of notary request.
- notaryRequest *payload.P2PNotaryRequest
+ Request *payload.P2PNotaryRequest
}
// MorphEvent implements Neo:Morph Event interface.
@@ -28,27 +24,27 @@ func (UpdatePeer) MorphEvent() {}
// Online returns true if node's state is requested to be switched
// to "online".
func (s UpdatePeer) Online() bool {
- return s.state == netmap.NodeStateOnline
+ return s.State == netmap.NodeStateOnline
}
// Maintenance returns true if node's state is requested to be switched
// to "maintenance".
func (s UpdatePeer) Maintenance() bool {
- return s.state == netmap.NodeStateMaintenance
+ return s.State == netmap.NodeStateMaintenance
}
func (s UpdatePeer) PublicKey() *keys.PublicKey {
- return s.publicKey
+ return s.PubKey
}
// NotaryRequest returns raw notary request if notification
// was received via notary service. Otherwise, returns nil.
func (s UpdatePeer) NotaryRequest() *payload.P2PNotaryRequest {
- return s.notaryRequest
+ return s.Request
}
func (s *UpdatePeer) decodeState(state int64) error {
- switch s.state = netmap.NodeState(state); s.state {
+ switch s.State = netmap.NodeState(state); s.State {
default:
return fmt.Errorf("unsupported node state %d", state)
case
@@ -60,43 +56,3 @@ func (s *UpdatePeer) decodeState(state int64) error {
}
const expectedItemNumUpdatePeer = 2
-
-func ParseUpdatePeer(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev UpdatePeer
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != expectedItemNumUpdatePeer {
- return nil, event.WrongNumberOfParameters(expectedItemNumUpdatePeer, ln)
- }
-
- // parse public key
- key, err := client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get public key: %w", err)
- }
-
- ev.publicKey, err = keys.NewPublicKeyFromBytes(key, elliptic.P256())
- if err != nil {
- return nil, fmt.Errorf("could not parse public key: %w", err)
- }
-
- // parse node status
- st, err := client.IntFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get node status: %w", err)
- }
-
- err = ev.decodeState(st)
- if err != nil {
- return nil, err
- }
-
- return ev, nil
-}
diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go
index b7a251f98e..993182ab49 100644
--- a/pkg/morph/event/netmap/update_peer_notary.go
+++ b/pkg/morph/event/netmap/update_peer_notary.go
@@ -10,16 +10,16 @@ import (
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
)
-var errNilPubKey = errors.New("could not parse public key: public key is nil")
+var errNilPubKey = errors.New("public key is nil")
func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
if v == nil {
return errNilPubKey
}
- s.publicKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256())
+ s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256())
if err != nil {
- return fmt.Errorf("could not parse public key: %w", err)
+ return fmt.Errorf("parse public key: %w", err)
}
return
@@ -73,7 +73,7 @@ func ParseUpdatePeerNotary(ne event.NotaryEvent) (event.Event, error) {
}
}
- ev.notaryRequest = ne.Raw()
+ ev.Request = ne.Raw()
return ev, nil
}
diff --git a/pkg/morph/event/netmap/update_peer_test.go b/pkg/morph/event/netmap/update_peer_test.go
deleted file mode 100644
index 1772c88a70..0000000000
--- a/pkg/morph/event/netmap/update_peer_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package netmap
-
-import (
- "math/big"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestParseUpdatePeer(t *testing.T) {
- priv, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- publicKey := priv.PublicKey()
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- }
-
- _, err := ParseUpdatePeer(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
- })
-
- t.Run("wrong first parameter type", func(t *testing.T) {
- _, err := ParseUpdatePeer(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong second parameter type", func(t *testing.T) {
- _, err := ParseUpdatePeer(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(publicKey.Bytes()),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- const state = netmap.NodeStateMaintenance
- ev, err := ParseUpdatePeer(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewBigInteger(big.NewInt(int64(state))),
- stackitem.NewByteArray(publicKey.Bytes()),
- }))
- require.NoError(t, err)
-
- require.Equal(t, UpdatePeer{
- publicKey: publicKey,
- state: state,
- }, ev)
- })
-}
diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go
index f661268f73..b119736465 100644
--- a/pkg/morph/event/notary_preparator.go
+++ b/pkg/morph/event/notary_preparator.go
@@ -39,11 +39,18 @@ var (
// ErrTXAlreadyHandled is returned if received TX has already been signed.
ErrTXAlreadyHandled = errors.New("received main tx has already been handled")
-
- // ErrMainTXExpired is returned if received fallback TX is already valid.
- ErrMainTXExpired = errors.New("received main tx has expired")
)
+// ExpiredTXError is returned if received fallback TX is already valid.
+type ExpiredTXError struct {
+ CurrentBlockHeight uint32
+ FallbackTXNotValidBeforeHeight uint32
+}
+
+func (e *ExpiredTXError) Error() string {
+ return "received main tx has expired"
+}
+
// BlockCounter must return block count of the network
// from which notary requests are received.
type BlockCounter interface {
@@ -104,52 +111,7 @@ func notaryPreparator(prm PreparatorPrm) NotaryPreparator {
// from the Notary service but already signed. This happens
// since every notary call is a new notary request in fact.
func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
- // notary request's main tx is expected to have
- // three or four witnesses: one for proxy contract,
- // one for alphabet multisignature, one optional for
- // notary's invoker and one is for notary contract
- ln := len(nr.MainTransaction.Scripts)
- switch ln {
- case 3, 4:
- default:
- return nil, errUnexpectedWitnessAmount
- }
- invokerWitness := ln == 4
-
- // alphabet node should handle only notary requests
- // that have been sent unsigned(by storage nodes) =>
- // such main TXs should have dummy scripts as an
- // invocation script
- //
- // this check prevents notary flow recursion
- if !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) {
- return nil, ErrTXAlreadyHandled
- }
-
- currentAlphabet, err := p.alphaKeys()
- if err != nil {
- return nil, fmt.Errorf("could not fetch Alphabet public keys: %w", err)
- }
-
- err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet)
- if err != nil {
- return nil, err
- }
-
- // validate main TX's notary attribute
- err = p.validateAttributes(nr.MainTransaction.Attributes, currentAlphabet, invokerWitness)
- if err != nil {
- return nil, err
- }
-
- // validate main TX's witnesses
- err = p.validateWitnesses(nr.MainTransaction.Scripts, currentAlphabet, invokerWitness)
- if err != nil {
- return nil, err
- }
-
- // validate main TX expiration
- err = p.validateExpiration(nr.FallbackTransaction)
+ err := p.validateNotaryRequest(nr)
if err != nil {
return nil, err
}
@@ -165,7 +127,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
for {
opCode, param, err = ctx.Next()
if err != nil {
- return nil, fmt.Errorf("could not get next opcode in script: %w", err)
+ return nil, fmt.Errorf("get next opcode in script: %w", err)
}
if opCode == opcode.RET {
@@ -185,7 +147,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
// retrieve contract's script hash
contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param)
if err != nil {
- return nil, fmt.Errorf("could not decode contract hash: %w", err)
+ return nil, fmt.Errorf("decode contract hash: %w", err)
}
// retrieve contract's method
@@ -202,7 +164,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
if len(args) != 0 {
err = p.validateParameterOpcodes(args)
if err != nil {
- return nil, fmt.Errorf("could not validate arguments: %w", err)
+ return nil, fmt.Errorf("validate arguments: %w", err)
}
// without args packing opcodes
@@ -217,6 +179,57 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
}, nil
}
+func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error {
+ // notary request's main tx is expected to have
+ // three or four witnesses: one for proxy contract,
+ // one for alphabet multisignature, one optional for
+ // notary's invoker and one is for notary contract
+ ln := len(nr.MainTransaction.Scripts)
+ switch ln {
+ case 3, 4:
+ default:
+ return errUnexpectedWitnessAmount
+ }
+ invokerWitness := ln == 4
+
+ // alphabet node should handle only notary requests that do not yet have inner
+ // ring multisignature filled => such main TXs either have empty invocation script
+ // of the inner ring witness (in case if Notary Actor is used to create request)
+ // or have it filled with dummy bytes (if request was created manually with the old
+ // neo-go API)
+ //
+ // this check prevents notary flow recursion
+ if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 &&
+ !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version
+ return ErrTXAlreadyHandled
+ }
+
+ currentAlphabet, err := p.alphaKeys()
+ if err != nil {
+ return fmt.Errorf("fetch Alphabet public keys: %w", err)
+ }
+
+ err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet)
+ if err != nil {
+ return err
+ }
+
+ // validate main TX's notary attribute
+ err = p.validateAttributes(nr.MainTransaction.Attributes, currentAlphabet, invokerWitness)
+ if err != nil {
+ return err
+ }
+
+ // validate main TX's witnesses
+ err = p.validateWitnesses(nr.MainTransaction.Scripts, currentAlphabet, invokerWitness)
+ if err != nil {
+ return err
+ }
+
+ // validate main TX expiration
+ return p.validateExpiration(nr.FallbackTransaction)
+}
+
func (p Preparator) validateParameterOpcodes(ops []Op) error {
l := len(ops)
@@ -226,15 +239,11 @@ func (p Preparator) validateParameterOpcodes(ops []Op) error {
argsLen, err := IntFromOpcode(ops[l-2])
if err != nil {
- return fmt.Errorf("could not parse argument len: %w", err)
+ return fmt.Errorf("parse argument len: %w", err)
}
err = validateNestedArgs(argsLen, ops[:l-2])
- if err != nil {
- return err
- }
-
- return nil
+ return err
}
func validateNestedArgs(expArgLen int64, ops []Op) error {
@@ -264,7 +273,7 @@ func validateNestedArgs(expArgLen int64, ops []Op) error {
argsLen, err := IntFromOpcode(ops[i-1])
if err != nil {
- return fmt.Errorf("could not parse argument len: %w", err)
+ return fmt.Errorf("parse argument len: %w", err)
}
expArgLen += argsLen + 1
@@ -298,11 +307,14 @@ func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error {
currBlock, err := p.blockCounter.BlockCount()
if err != nil {
- return fmt.Errorf("could not fetch current chain height: %w", err)
+ return fmt.Errorf("fetch current chain height: %w", err)
}
if currBlock >= nvb.Height {
- return ErrMainTXExpired
+ return &ExpiredTXError{
+ CurrentBlockHeight: currBlock,
+ FallbackTXNotValidBeforeHeight: nvb.Height,
+ }
}
return nil
@@ -315,7 +327,7 @@ func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alph
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("could not get Alphabet verification script: %w", err)
+ return fmt.Errorf("get Alphabet verification script: %w", err)
}
if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) {
@@ -334,7 +346,7 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("could not get Alphabet verification script: %w", err)
+ return fmt.Errorf("get Alphabet verification script: %w", err)
}
// the second one must be witness of the current
@@ -352,7 +364,9 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
// the last one must be a placeholder for notary contract witness
last := len(w) - 1
- if !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript) || len(w[last].VerificationScript) != 0 {
+ if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981
+ !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
+ len(w[last].VerificationScript) != 0 {
return errIncorrectNotaryPlaceholder
}
diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go
index d0463348d3..60ddb46010 100644
--- a/pkg/morph/event/notary_preparator_test.go
+++ b/pkg/morph/event/notary_preparator_test.go
@@ -1,6 +1,7 @@
package event
import (
+ "fmt"
"testing"
"github.com/nspcc-dev/neo-go/pkg/vm"
@@ -24,8 +25,9 @@ var (
alphaKeys keys.PublicKeys
wrongAlphaKeys keys.PublicKeys
- dummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...)
- wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...)
+ dummyAlphabetInvocationScript []byte
+ dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually
+ wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...)
scriptHash util.Uint160
)
@@ -61,35 +63,37 @@ func TestPrepare_IncorrectScript(t *testing.T) {
},
)
- t.Run("not contract call", func(t *testing.T) {
- bw := io.NewBufBinWriter()
+ for _, dummyMultisig := range []bool{true, false} { // try both empty and dummy multisig/Notary invocation witness script
+ t.Run(fmt.Sprintf("not contract call, compat: %t", dummyMultisig), func(t *testing.T) {
+ bw := io.NewBufBinWriter()
- emit.Int(bw.BinWriter, 4)
- emit.String(bw.BinWriter, "test")
- emit.Bytes(bw.BinWriter, scriptHash.BytesBE())
- emit.Syscall(bw.BinWriter, interopnames.SystemContractCallNative) // any != interopnames.SystemContractCall
+ emit.Int(bw.BinWriter, 4)
+ emit.String(bw.BinWriter, "test")
+ emit.Bytes(bw.BinWriter, scriptHash.BytesBE())
+ emit.Syscall(bw.BinWriter, interopnames.SystemContractCallNative) // any != interopnames.SystemContractCall
- nr := correctNR(bw.Bytes(), false)
+ nr := correctNR(bw.Bytes(), dummyMultisig, false)
- _, err := preparator.Prepare(nr)
+ _, err := preparator.Prepare(nr)
- require.EqualError(t, err, errNotContractCall.Error())
- })
+ require.EqualError(t, err, errNotContractCall.Error())
+ })
- t.Run("incorrect ", func(t *testing.T) {
- bw := io.NewBufBinWriter()
+ t.Run(fmt.Sprintf("incorrect, compat: %t", dummyMultisig), func(t *testing.T) {
+ bw := io.NewBufBinWriter()
- emit.Int(bw.BinWriter, -1)
- emit.String(bw.BinWriter, "test")
- emit.Bytes(bw.BinWriter, scriptHash.BytesBE())
- emit.Syscall(bw.BinWriter, interopnames.SystemContractCall)
+ emit.Int(bw.BinWriter, -1)
+ emit.String(bw.BinWriter, "test")
+ emit.Bytes(bw.BinWriter, scriptHash.BytesBE())
+ emit.Syscall(bw.BinWriter, interopnames.SystemContractCall)
- nr := correctNR(bw.Bytes(), false)
+ nr := correctNR(bw.Bytes(), dummyMultisig, false)
- _, err := preparator.Prepare(nr)
+ _, err := preparator.Prepare(nr)
- require.EqualError(t, err, errIncorrectCallFlag.Error())
- })
+ require.EqualError(t, err, errIncorrectCallFlag.Error())
+ })
+ }
}
func TestPrepare_IncorrectNR(t *testing.T) {
@@ -209,7 +213,23 @@ func TestPrepare_IncorrectNR(t *testing.T) {
InvocationScript: make([]byte, 1),
},
{
- InvocationScript: dummyInvocationScript,
+ InvocationScript: dummyAlphabetInvocationScript,
+ },
+ {},
+ },
+ },
+ expErr: errIncorrectProxyWitnesses,
+ },
+ {
+ name: "incorrect main TX proxy witness compat",
+ addW: false,
+ mTX: mTX{
+ scripts: []transaction.Witness{
+ {
+ InvocationScript: make([]byte, 1),
+ },
+ {
+ InvocationScript: dummyAlphabetInvocationScriptOld,
},
{},
},
@@ -224,7 +244,22 @@ func TestPrepare_IncorrectNR(t *testing.T) {
{},
{
VerificationScript: wrongAlphaVerificationScript,
- InvocationScript: dummyInvocationScript,
+ InvocationScript: dummyAlphabetInvocationScript,
+ },
+ {},
+ },
+ },
+ expErr: errIncorrectAlphabet,
+ },
+ {
+ name: "incorrect main TX Alphabet witness compat",
+ addW: false,
+ mTX: mTX{
+ scripts: []transaction.Witness{
+ {},
+ {
+ VerificationScript: wrongAlphaVerificationScript,
+ InvocationScript: dummyAlphabetInvocationScriptOld,
},
{},
},
@@ -239,7 +274,24 @@ func TestPrepare_IncorrectNR(t *testing.T) {
{},
{
VerificationScript: alphaVerificationScript,
- InvocationScript: dummyInvocationScript,
+ InvocationScript: dummyAlphabetInvocationScript,
+ },
+ {
+ InvocationScript: wrongDummyInvocationScript,
+ },
+ },
+ },
+ expErr: errIncorrectNotaryPlaceholder,
+ },
+ {
+ name: "incorrect main TX Notary witness compat",
+ addW: false,
+ mTX: mTX{
+ scripts: []transaction.Witness{
+ {},
+ {
+ VerificationScript: alphaVerificationScript,
+ InvocationScript: dummyAlphabetInvocationScriptOld,
},
{
InvocationScript: wrongDummyInvocationScript,
@@ -279,7 +331,7 @@ func TestPrepare_IncorrectNR(t *testing.T) {
{},
},
},
- expErr: ErrMainTXExpired,
+ expErr: &ExpiredTXError{},
},
{
name: "incorrect invoker TX Alphabet witness",
@@ -289,7 +341,23 @@ func TestPrepare_IncorrectNR(t *testing.T) {
{},
{
VerificationScript: alphaVerificationScript,
- InvocationScript: dummyInvocationScript,
+ InvocationScript: dummyAlphabetInvocationScript,
+ },
+ {},
+ {},
+ },
+ },
+ expErr: errIncorrectInvokerWitnesses,
+ },
+ {
+ name: "incorrect invoker TX Alphabet witness compat",
+ addW: true,
+ mTX: mTX{
+ scripts: []transaction.Witness{
+ {},
+ {
+ VerificationScript: alphaVerificationScript,
+ InvocationScript: dummyAlphabetInvocationScriptOld,
},
{},
{},
@@ -327,7 +395,7 @@ func TestPrepare_IncorrectNR(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- correctNR := correctNR(nil, test.addW)
+ correctNR := correctNR(nil, false, test.addW)
incorrectNR = setIncorrectFields(*correctNR, test.mTX, test.fbTX)
_, err = preparator.Prepare(&incorrectNR)
@@ -371,41 +439,43 @@ func TestPrepare_CorrectNR(t *testing.T) {
)
for _, test := range tests {
- for i := 0; i < 1; i++ { // run tests against 3 and 4 witness NR
- additionalWitness := i == 0
- nr := correctNR(script(test.hash, test.method, test.args...), additionalWitness)
+ for i := range 1 { // run tests against 3 and 4 witness NR
+ for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness
+ additionalWitness := i == 0
+ nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness)
- event, err := preparator.Prepare(nr)
+ event, err := preparator.Prepare(nr)
- require.NoError(t, err)
- require.Equal(t, test.method, event.Type().String())
- require.Equal(t, test.hash.StringLE(), event.ScriptHash().StringLE())
-
- // check args parsing
- bw := io.NewBufBinWriter()
- emit.Array(bw.BinWriter, test.args...)
-
- ctx := vm.NewContext(bw.Bytes())
-
- opCode, param, err := ctx.Next()
- require.NoError(t, err)
-
- for _, opGot := range event.Params() {
- require.Equal(t, opCode, opGot.code)
- require.Equal(t, param, opGot.param)
-
- opCode, param, err = ctx.Next()
require.NoError(t, err)
+ require.Equal(t, test.method, event.Type().String())
+ require.Equal(t, test.hash.StringLE(), event.ScriptHash().StringLE())
+
+ // check args parsing
+ bw := io.NewBufBinWriter()
+ emit.Array(bw.BinWriter, test.args...)
+
+ ctx := vm.NewContext(bw.Bytes())
+
+ opCode, param, err := ctx.Next()
+ require.NoError(t, err)
+
+ for _, opGot := range event.Params() {
+ require.Equal(t, opCode, opGot.code)
+ require.Equal(t, param, opGot.param)
+
+ opCode, param, err = ctx.Next()
+ require.NoError(t, err)
+ }
+
+ _, _, err = ctx.Next() // PACK opcode
+ require.NoError(t, err)
+ _, _, err = ctx.Next() // packing len opcode
+ require.NoError(t, err)
+
+ opCode, _, err = ctx.Next()
+ require.NoError(t, err)
+ require.Equal(t, opcode.RET, opCode)
}
-
- _, _, err = ctx.Next() // PACK opcode
- require.NoError(t, err)
- _, _, err = ctx.Next() // packing len opcode
- require.NoError(t, err)
-
- opCode, _, err = ctx.Next()
- require.NoError(t, err)
- require.Equal(t, opcode.RET, opCode)
}
}
}
@@ -428,7 +498,7 @@ func script(hash util.Uint160, method string, args ...any) []byte {
return bw.Bytes()
}
-func correctNR(script []byte, additionalWitness bool) *payload.P2PNotaryRequest {
+func correctNR(script []byte, dummyMultisig, additionalWitness bool) *payload.P2PNotaryRequest {
alphaVerificationScript, _ := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
signers := []transaction.Signer{
@@ -443,20 +513,24 @@ func correctNR(script []byte, additionalWitness bool) *payload.P2PNotaryRequest
signers[2] = transaction.Signer{Account: hash.Hash160(alphaVerificationScript)}
}
+ multisigInv := dummyAlphabetInvocationScript
+ if dummyMultisig {
+ multisigInv = dummyAlphabetInvocationScriptOld
+ }
scripts := []transaction.Witness{
{},
{
- InvocationScript: dummyInvocationScript,
+ InvocationScript: multisigInv,
VerificationScript: alphaVerificationScript,
},
{
- InvocationScript: dummyInvocationScript,
+ InvocationScript: multisigInv,
},
}
if additionalWitness { // insert on element with index 2
scripts = append(scripts[:2+1], scripts[2:]...)
scripts[2] = transaction.Witness{
- InvocationScript: dummyInvocationScript,
+ InvocationScript: multisigInv,
VerificationScript: alphaVerificationScript,
}
}
diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go
index 90eff0bd22..5adeb4b30f 100644
--- a/pkg/morph/event/parsers.go
+++ b/pkg/morph/event/parsers.go
@@ -11,15 +11,6 @@ import (
// from the StackItem list.
type NotificationParser func(*state.ContainedNotificationEvent) (Event, error)
-// NotificationParserInfo is a structure that groups
-// the parameters of particular contract
-// notification event parser.
-type NotificationParserInfo struct {
- scriptHashWithType
-
- p NotificationParser
-}
-
// NotaryPreparator constructs NotaryEvent
// from the NotaryRequest event.
type NotaryPreparator interface {
@@ -47,24 +38,6 @@ func (n *NotaryParserInfo) SetParser(p NotaryParser) {
n.p = p
}
-// SetParser is an event parser setter.
-func (s *NotificationParserInfo) SetParser(v NotificationParser) {
- s.p = v
-}
-
-func (s NotificationParserInfo) parser() NotificationParser {
- return s.p
-}
-
-// SetType is an event type setter.
-func (s *NotificationParserInfo) SetType(v Type) {
- s.typ = v
-}
-
-func (s NotificationParserInfo) getType() Type {
- return s.typ
-}
-
type wrongPrmNumber struct {
exp, act int
}
diff --git a/pkg/morph/event/reputation/put.go b/pkg/morph/event/reputation/put.go
deleted file mode 100644
index a182bf26c3..0000000000
--- a/pkg/morph/event/reputation/put.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/network/payload"
-)
-
-// Put structure of reputation.reputationPut notification from
-// morph chain.
-type Put struct {
- epoch uint64
- peerID reputation.PeerID
- value reputation.GlobalTrust
-
- // For notary notifications only.
- // Contains raw transactions of notary request.
- notaryRequest *payload.P2PNotaryRequest
-}
-
-const peerIDLength = 33 // compressed public key
-
-// MorphEvent implements Neo:Morph Event interface.
-func (Put) MorphEvent() {}
-
-// Epoch returns epoch value of reputation data.
-func (p Put) Epoch() uint64 {
- return p.epoch
-}
-
-// PeerID returns peer id of reputation data.
-func (p Put) PeerID() reputation.PeerID {
- return p.peerID
-}
-
-// Value returns reputation structure.
-func (p Put) Value() reputation.GlobalTrust {
- return p.value
-}
-
-// NotaryRequest returns raw notary request if notification
-// was received via notary service. Otherwise, returns nil.
-func (p Put) NotaryRequest() *payload.P2PNotaryRequest {
- return p.notaryRequest
-}
-
-// ParsePut from notification into reputation event structure.
-func ParsePut(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Put
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != 3 {
- return nil, event.WrongNumberOfParameters(3, ln)
- }
-
- // parse epoch number
- epoch, err := client.IntFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get integer epoch number: %w", err)
- }
-
- ev.epoch = uint64(epoch)
-
- // parse peer ID value
- peerID, err := client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get peer ID value: %w", err)
- }
-
- if ln := len(peerID); ln != peerIDLength {
- return nil, fmt.Errorf("peer ID is %d byte long, expected %d", ln, peerIDLength)
- }
-
- ev.peerID.SetPublicKey(peerID)
-
- // parse global trust value
- rawValue, err := client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get global trust value: %w", err)
- }
-
- err = ev.value.Unmarshal(rawValue)
- if err != nil {
- return nil, fmt.Errorf("could not parse global trust value: %w", err)
- }
-
- return ev, nil
-}
diff --git a/pkg/morph/event/reputation/put_notary.go b/pkg/morph/event/reputation/put_notary.go
deleted file mode 100644
index f3cd749fc3..0000000000
--- a/pkg/morph/event/reputation/put_notary.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
-)
-
-func (p *Put) setEpoch(v uint64) {
- p.epoch = v
-}
-
-func (p *Put) setPeerID(v []byte) error {
- if ln := len(v); ln != peerIDLength {
- return fmt.Errorf("peer ID is %d byte long, expected %d", ln, peerIDLength)
- }
-
- p.peerID.SetPublicKey(v)
-
- return nil
-}
-
-func (p *Put) setValue(v []byte) error {
- return p.value.Unmarshal(v)
-}
-
-var fieldSetters = []func(*Put, []byte) error{
- // order on stack is reversed
- (*Put).setValue,
- (*Put).setPeerID,
-}
-
-const (
- // PutNotaryEvent is method name for reputation put operations
- // in `Reputation` contract. Is used as identifier for notary
- // put reputation requests.
- PutNotaryEvent = "put"
-)
-
-// ParsePutNotary from NotaryEvent into reputation event structure.
-func ParsePutNotary(ne event.NotaryEvent) (event.Event, error) {
- var ev Put
-
- fieldNum := 0
-
- for _, op := range ne.Params() {
- switch fieldNum {
- case 0, 1:
- data, err := event.BytesFromOpcode(op)
- if err != nil {
- return nil, err
- }
-
- err = fieldSetters[fieldNum](&ev, data)
- if err != nil {
- return nil, fmt.Errorf("can't parse field num %d: %w", fieldNum, err)
- }
- case 2:
- n, err := event.IntFromOpcode(op)
- if err != nil {
- return nil, err
- }
-
- ev.setEpoch(uint64(n))
- default:
- return nil, event.UnexpectedArgNumErr(PutNotaryEvent)
- }
- fieldNum++
- }
-
- ev.notaryRequest = ne.Raw()
-
- return ev, nil
-}
diff --git a/pkg/morph/event/reputation/put_test.go b/pkg/morph/event/reputation/put_test.go
deleted file mode 100644
index 46356b317e..0000000000
--- a/pkg/morph/event/reputation/put_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package reputation
-
-import (
- "math/big"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- reputationtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation/test"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestParsePut(t *testing.T) {
- var (
- peerID = reputationtest.PeerID()
-
- value reputation.GlobalTrust
- trust reputation.Trust
- trustValue float64 = 0.64
-
- epoch uint64 = 42
- )
-
- trust.SetValue(trustValue)
- trust.SetPeer(peerID)
-
- value.SetTrust(trust)
-
- rawValue := value.Marshal()
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParsePut(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
- })
-
- t.Run("wrong epoch parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong peerID parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewBigInteger(new(big.Int).SetUint64(epoch)),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong value parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewBigInteger(new(big.Int).SetUint64(epoch)),
- stackitem.NewByteArray(peerID.PublicKey()),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewBigInteger(new(big.Int).SetUint64(epoch)),
- stackitem.NewByteArray(peerID.PublicKey()),
- stackitem.NewByteArray(rawValue),
- }))
- require.NoError(t, err)
-
- require.Equal(t, Put{
- epoch: epoch,
- peerID: peerID,
- value: value,
- }, ev)
- })
-}
-
-func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
- return &state.ContainedNotificationEvent{
- NotificationEvent: state.NotificationEvent{
- Item: stackitem.NewArray(items),
- },
- }
-}
diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go
index 28c9680462..b384e436b6 100644
--- a/pkg/morph/event/rolemanagement/designate.go
+++ b/pkg/morph/event/rolemanagement/designate.go
@@ -26,7 +26,7 @@ func (Designate) MorphEvent() {}
func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) {
params, err := event.ParseStackArray(e)
if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ return nil, fmt.Errorf("parse stack items from notify event: %w", err)
}
if len(params) != 2 {
diff --git a/pkg/morph/event/subnet/delete.go b/pkg/morph/event/subnet/delete.go
deleted file mode 100644
index f46658b586..0000000000
--- a/pkg/morph/event/subnet/delete.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package subnetevents
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-// Delete structures information about the notification generated by Delete method of Subnet contract.
-type Delete struct {
- txHash util.Uint256
-
- id []byte
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (Delete) MorphEvent() {}
-
-// ID returns identifier of the removed subnet in a binary format of NeoFS API protocol.
-func (x Delete) ID() []byte {
- return x.id
-}
-
-// TxHash returns hash of the transaction which thrown the notification event.
-// Makes sense only in notary environments.
-func (x Delete) TxHash() util.Uint256 {
- return x.txHash
-}
-
-// ParseDelete parses the notification about the removal of a subnet which has been thrown
-// by the appropriate method of the Subnet contract.
-//
-// Resulting event is of Delete type.
-func ParseDelete(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Delete
- err error
- )
-
- items, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("parse stack array: %w", err)
- }
-
- const itemNumDelete = 1
-
- if ln := len(items); ln != itemNumDelete {
- return nil, event.WrongNumberOfParameters(itemNumDelete, ln)
- }
-
- // parse ID
- ev.id, err = client.BytesFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("id item: %w", err)
- }
-
- ev.txHash = e.Container
-
- return ev, nil
-}
diff --git a/pkg/morph/event/subnet/delete_test.go b/pkg/morph/event/subnet/delete_test.go
deleted file mode 100644
index fc68bb227c..0000000000
--- a/pkg/morph/event/subnet/delete_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package subnetevents_test
-
-import (
- "testing"
-
- subnetevents "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestParseDelete(t *testing.T) {
- id := []byte("id")
-
- t.Run("wrong number of items", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewByteArray(nil),
- stackitem.NewByteArray(nil),
- }
-
- _, err := subnetevents.ParseDelete(createNotifyEventFromItems(prms))
- require.Error(t, err)
- })
-
- t.Run("wrong id item", func(t *testing.T) {
- _, err := subnetevents.ParseDelete(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := subnetevents.ParseDelete(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(id),
- }))
- require.NoError(t, err)
-
- v := ev.(subnetevents.Delete)
-
- require.Equal(t, id, v.ID())
- })
-}
diff --git a/pkg/morph/event/subnet/put.go b/pkg/morph/event/subnet/put.go
deleted file mode 100644
index 3b1f5297d5..0000000000
--- a/pkg/morph/event/subnet/put.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package subnetevents
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/core/transaction"
- "github.com/nspcc-dev/neo-go/pkg/network/payload"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-// Put structures information about the notification generated by Put method of Subnet contract.
-type Put struct {
- notaryRequest *payload.P2PNotaryRequest
-
- txHash util.Uint256
-
- id []byte
-
- owner []byte
-
- info []byte
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (Put) MorphEvent() {}
-
-// ID returns identifier of the creating subnet in a binary format of FrostFS API protocol.
-func (x Put) ID() []byte {
- return x.id
-}
-
-// Owner returns subnet owner's public key in a binary format.
-func (x Put) Owner() []byte {
- return x.owner
-}
-
-// Info returns information about the subnet in a binary format of FrostFS API protocol.
-func (x Put) Info() []byte {
- return x.info
-}
-
-// TxHash returns hash of the transaction which thrown the notification event.
-// Makes sense only in notary environments.
-func (x Put) TxHash() util.Uint256 {
- return x.txHash
-}
-
-// NotaryMainTx returns main transaction of the request in the Notary service.
-// Returns nil in non-notary environments.
-func (x Put) NotaryMainTx() *transaction.Transaction {
- if x.notaryRequest != nil {
- return x.notaryRequest.MainTransaction
- }
-
- return nil
-}
-
-// number of items in notification about subnet creation.
-const itemNumPut = 3
-
-// ParsePut parses the notification about the creation of a subnet which has been thrown
-// by the appropriate method of the subnet contract.
-//
-// Resulting event is of Put type.
-func ParsePut(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- put Put
- err error
- )
-
- items, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("parse stack array: %w", err)
- }
-
- if ln := len(items); ln != itemNumPut {
- return nil, event.WrongNumberOfParameters(itemNumPut, ln)
- }
-
- // parse ID
- put.id, err = client.BytesFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("id item: %w", err)
- }
-
- // parse owner
- put.owner, err = client.BytesFromStackItem(items[1])
- if err != nil {
- return nil, fmt.Errorf("owner item: %w", err)
- }
-
- // parse info about subnet
- put.info, err = client.BytesFromStackItem(items[2])
- if err != nil {
- return nil, fmt.Errorf("info item: %w", err)
- }
-
- put.txHash = e.Container
-
- return put, nil
-}
-
-// ParseNotaryPut parses the notary notification about the creation of a subnet which has been
-// thrown by the appropriate method of the subnet contract.
-//
-// Resulting event is of Put type.
-func ParseNotaryPut(e event.NotaryEvent) (event.Event, error) {
- var put Put
-
- put.notaryRequest = e.Raw()
- if put.notaryRequest == nil {
- panic(fmt.Sprintf("nil %T in notary environment", put.notaryRequest))
- }
-
- var (
- err error
-
- prms = e.Params()
- )
-
- if ln := len(prms); ln != itemNumPut {
- return nil, event.WrongNumberOfParameters(itemNumPut, ln)
- }
-
- // parse info about subnet
- put.info, err = event.BytesFromOpcode(prms[0])
- if err != nil {
- return nil, fmt.Errorf("info param: %w", err)
- }
-
- // parse owner
- put.owner, err = event.BytesFromOpcode(prms[1])
- if err != nil {
- return nil, fmt.Errorf("creator param: %w", err)
- }
-
- // parse ID
- put.id, err = event.BytesFromOpcode(prms[2])
- if err != nil {
- return nil, fmt.Errorf("id param: %w", err)
- }
-
- return put, nil
-}
diff --git a/pkg/morph/event/subnet/put_test.go b/pkg/morph/event/subnet/put_test.go
deleted file mode 100644
index 8a75b62c86..0000000000
--- a/pkg/morph/event/subnet/put_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package subnetevents_test
-
-import (
- "testing"
-
- subnetevents "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestParsePut(t *testing.T) {
- var (
- id = []byte("id")
- owner = []byte("owner")
- info = []byte("info")
- )
-
- t.Run("wrong number of items", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewByteArray(nil),
- stackitem.NewByteArray(nil),
- }
-
- _, err := subnetevents.ParsePut(createNotifyEventFromItems(prms))
- require.Error(t, err)
- })
-
- t.Run("wrong id item", func(t *testing.T) {
- _, err := subnetevents.ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong owner item", func(t *testing.T) {
- _, err := subnetevents.ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(id),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong info item", func(t *testing.T) {
- _, err := subnetevents.ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(id),
- stackitem.NewByteArray(owner),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := subnetevents.ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(id),
- stackitem.NewByteArray(owner),
- stackitem.NewByteArray(info),
- }))
- require.NoError(t, err)
-
- v := ev.(subnetevents.Put)
-
- require.Equal(t, id, v.ID())
- require.Equal(t, owner, v.Owner())
- require.Equal(t, info, v.Info())
- })
-}
diff --git a/pkg/morph/event/subnet/remove_node.go b/pkg/morph/event/subnet/remove_node.go
deleted file mode 100644
index 67bfb8918a..0000000000
--- a/pkg/morph/event/subnet/remove_node.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package subnetevents
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-// RemoveNode structure of subnet.RemoveNode notification from morph chain.
-type RemoveNode struct {
- subnetID []byte
- nodeKey []byte
-
- // txHash is used in notary environmental
- // for calculating unique but same for
- // all notification receivers values.
- txHash util.Uint256
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (RemoveNode) MorphEvent() {}
-
-// SubnetworkID returns a marshalled subnetID structure, defined in API.
-func (rn RemoveNode) SubnetworkID() []byte { return rn.subnetID }
-
-// Node is public key of the nodeKey that is being deleted.
-func (rn RemoveNode) Node() []byte { return rn.nodeKey }
-
-// TxHash returns hash of the TX with RemoveNode
-// notification.
-func (rn RemoveNode) TxHash() util.Uint256 { return rn.txHash }
-
-const expectedItemNumRemoveNode = 2
-
-// ParseRemoveNode parses notification into subnet event structure.
-//
-// Expects 2 stack items.
-func ParseRemoveNode(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev RemoveNode
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != expectedItemNumRemoveNode {
- return nil, event.WrongNumberOfParameters(expectedItemNumRemoveNode, ln)
- }
-
- ev.subnetID, err = client.BytesFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get raw subnetID: %w", err)
- }
-
- ev.nodeKey, err = client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get raw public key of the node: %w", err)
- }
-
- ev.txHash = e.Container
-
- return ev, nil
-}
diff --git a/pkg/morph/event/subnet/remove_node_test.go b/pkg/morph/event/subnet/remove_node_test.go
deleted file mode 100644
index 70fff4dc83..0000000000
--- a/pkg/morph/event/subnet/remove_node_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package subnetevents_test
-
-import (
- "testing"
-
- . "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
- subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestParseRemoveNode(t *testing.T) {
- t.Run("wrong number of arguments", func(t *testing.T) {
- _, err := ParseRemoveNode(createNotifyEventFromItems([]stackitem.Item{}))
- require.Error(t, err)
- })
-
- t.Run("invalid item type", func(t *testing.T) {
- args := []stackitem.Item{stackitem.NewMap(), stackitem.Make(123)}
- _, err := ParseRemoveNode(createNotifyEventFromItems(args))
- require.Error(t, err)
- })
-
- subnetID := subnetid.ID{}
- subnetID.SetNumeric(123)
-
- rawSubnetID := subnetID.Marshal()
-
- priv, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- pub := priv.PublicKey()
-
- t.Run("good", func(t *testing.T) {
- args := []stackitem.Item{stackitem.NewByteArray(rawSubnetID), stackitem.Make(pub.Bytes())}
-
- e, err := ParseRemoveNode(createNotifyEventFromItems(args))
- require.NoError(t, err)
-
- gotRaw := e.(RemoveNode).SubnetworkID()
- require.NoError(t, err)
-
- require.Equal(t, rawSubnetID, gotRaw)
- require.Equal(t, pub.Bytes(), e.(RemoveNode).Node())
- })
-}
-
-func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
- return &state.ContainedNotificationEvent{
- NotificationEvent: state.NotificationEvent{
- Item: stackitem.NewArray(items),
- },
- }
-}
diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go
index 355fd5b4da..0088be4001 100644
--- a/pkg/morph/event/utils.go
+++ b/pkg/morph/event/utils.go
@@ -1,9 +1,11 @@
package event
import (
+ "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
util2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -18,13 +20,9 @@ type scriptHashValue struct {
hash util.Uint160
}
-type typeValue struct {
- typ Type
-}
-
type scriptHashWithType struct {
- scriptHashValue
- typeValue
+ Hash util.Uint160
+ Type Type
}
type notaryRequestTypes struct {
@@ -71,26 +69,15 @@ func (s scriptHashValue) ScriptHash() util.Uint160 {
return s.hash
}
-// SetType is an event type setter.
-func (s *typeValue) SetType(v Type) {
- s.typ = v
-}
-
-// GetType is an event type getter.
-func (s typeValue) GetType() Type {
- return s.typ
-}
-
// WorkerPoolHandler sets closure over worker pool w with passed handler h.
func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler {
- return func(e Event) {
+ return func(ctx context.Context, e Event) {
err := w.Submit(func() {
- h(e)
+ h(ctx, e)
})
-
if err != nil {
- log.Warn("could not Submit handler to worker pool",
- zap.String("error", err.Error()),
+ log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool,
+ zap.Error(err),
)
}
}
diff --git a/pkg/morph/event/utils_test.go b/pkg/morph/event/utils_test.go
new file mode 100644
index 0000000000..83facc6530
--- /dev/null
+++ b/pkg/morph/event/utils_test.go
@@ -0,0 +1,48 @@
+package event
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseStackArray(t *testing.T) {
+ t.Run("success", func(t *testing.T) {
+ arr := &stackitem.Array{}
+ arr.Append(stackitem.NewBigInteger(big.NewInt(1)))
+ arr.Append(stackitem.NewBigInteger(big.NewInt(2)))
+ ev := &state.ContainedNotificationEvent{
+ Container: util.Uint256{67},
+ NotificationEvent: state.NotificationEvent{
+ ScriptHash: util.Uint160{69},
+ Name: "name",
+ Item: arr,
+ },
+ }
+
+ items, err := ParseStackArray(ev)
+ require.NoError(t, err, "failed to parse event items")
+ require.Equal(t, 2, len(items), "invalid length")
+ require.Equal(t, stackitem.NewBigInteger(big.NewInt(1)), items[0], "invalid item 0")
+ require.Equal(t, stackitem.NewBigInteger(big.NewInt(2)), items[1], "invalid item 0")
+ })
+ t.Run("empty stack error", func(t *testing.T) {
+ arr := &stackitem.Array{}
+ ev := &state.ContainedNotificationEvent{
+ Container: util.Uint256{67},
+ NotificationEvent: state.NotificationEvent{
+ ScriptHash: util.Uint160{69},
+ Name: "name",
+ Item: arr,
+ },
+ }
+
+ items, err := ParseStackArray(ev)
+ require.ErrorIs(t, err, errEmptyStackArray, "invalid empty array error")
+ require.Equal(t, 0, len(items), "items was returned")
+ })
+}
diff --git a/pkg/morph/metrics/metrics.go b/pkg/morph/metrics/metrics.go
new file mode 100644
index 0000000000..5d74b054d3
--- /dev/null
+++ b/pkg/morph/metrics/metrics.go
@@ -0,0 +1,21 @@
+package metrics
+
+import "time"
+
+type Register interface {
+ IncSwitchCount()
+ SetLastBlock(uint32)
+ IncNotificationCount(notificationType string)
+ ObserveInvoke(typ string, contract string, method string, success bool, d time.Duration)
+}
+
+type NoopRegister struct{}
+
+func (NoopRegister) IncSwitchCount() {}
+func (NoopRegister) SetLastBlock(uint32) {}
+func (NoopRegister) IncNotificationCount(string) {}
+func (NoopRegister) ObserveInvoke(string, string, string, bool, time.Duration) {}
+
+type NoopMorphCacheMetrics struct{}
+
+func (m *NoopMorphCacheMetrics) AddMethodDuration(string, bool, time.Duration) {}
diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go
index 6229e6f308..4ef59ed6ad 100644
--- a/pkg/morph/subscriber/subscriber.go
+++ b/pkg/morph/subscriber/subscriber.go
@@ -6,36 +6,55 @@ import (
"fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/neorpc"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
type (
+ NotificationChannels struct {
+ BlockCh <-chan *block.Block
+ NotificationsCh <-chan *state.ContainedNotificationEvent
+ NotaryRequestsCh <-chan *result.NotaryRequestEvent
+ }
+
// Subscriber is an interface of the NotificationEvent listener.
Subscriber interface {
- SubscribeForNotification(...util.Uint160) (<-chan *state.ContainedNotificationEvent, error)
- UnsubscribeForNotification()
- BlockNotifications() (<-chan *block.Block, error)
- SubscribeForNotaryRequests(mainTXSigner util.Uint160) (<-chan *result.NotaryRequestEvent, error)
+ SubscribeForNotification(...util.Uint160) error
+ BlockNotifications() error
+ SubscribeForNotaryRequests(mainTXSigner util.Uint160) error
+
+ NotificationChannels() NotificationChannels
+
Close()
}
+ subChannels struct {
+ NotifyChan chan *state.ContainedNotificationEvent
+ BlockChan chan *block.Block
+ NotaryChan chan *result.NotaryRequestEvent
+ }
+
subscriber struct {
- *sync.RWMutex
+ sync.RWMutex
log *logger.Logger
client *client.Client
notifyChan chan *state.ContainedNotificationEvent
-
- blockChan chan *block.Block
-
+ blockChan chan *block.Block
notaryChan chan *result.NotaryRequestEvent
+
+ current subChannels
+
+ // cached subscription information
+ subscribedEvents map[util.Uint160]bool
+ subscribedNotaryEvents map[util.Uint160]bool
+ subscribedToNewBlocks bool
}
// Params is a group of Subscriber constructor parameters.
@@ -46,6 +65,14 @@ type (
}
)
+func (s *subscriber) NotificationChannels() NotificationChannels {
+ return NotificationChannels{
+ BlockCh: s.blockChan,
+ NotificationsCh: s.notifyChan,
+ NotaryRequestsCh: s.notaryChan,
+ }
+}
+
var (
errNilParams = errors.New("chain/subscriber: config was not provided to the constructor")
@@ -54,118 +81,68 @@ var (
errNilClient = errors.New("chain/subscriber: client was not provided to the constructor")
)
-func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) (<-chan *state.ContainedNotificationEvent, error) {
+func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) error {
s.Lock()
defer s.Unlock()
- notifyIDs := make(map[util.Uint160]struct{}, len(contracts))
+ notifyIDs := make([]string, 0, len(contracts))
for i := range contracts {
+ if s.subscribedEvents[contracts[i]] {
+ continue
+ }
// subscribe to contract notifications
- err := s.client.SubscribeForExecutionNotifications(contracts[i])
+ id, err := s.client.ReceiveExecutionNotifications(contracts[i], s.current.NotifyChan)
if err != nil {
// if there is some error, undo all subscriptions and return error
- for hash := range notifyIDs {
- _ = s.client.UnsubscribeContract(hash)
+ for _, id := range notifyIDs {
+ _ = s.client.Unsubscribe(id)
}
- return nil, err
+ return err
}
// save notification id
- notifyIDs[contracts[i]] = struct{}{}
+ notifyIDs = append(notifyIDs, id)
+ }
+ for i := range contracts {
+ s.subscribedEvents[contracts[i]] = true
}
- return s.notifyChan, nil
-}
-
-func (s *subscriber) UnsubscribeForNotification() {
- err := s.client.UnsubscribeAll()
- if err != nil {
- s.log.Error("unsubscribe for notification",
- zap.Error(err))
- }
+ return nil
}
func (s *subscriber) Close() {
s.client.Close()
}
-func (s *subscriber) BlockNotifications() (<-chan *block.Block, error) {
- if err := s.client.SubscribeForNewBlocks(); err != nil {
- return nil, fmt.Errorf("could not subscribe for new block events: %w", err)
+func (s *subscriber) BlockNotifications() error {
+ s.Lock()
+ defer s.Unlock()
+ if s.subscribedToNewBlocks {
+ return nil
+ }
+ if _, err := s.client.ReceiveBlocks(s.current.BlockChan); err != nil {
+ return fmt.Errorf("could not subscribe for new block events: %w", err)
}
- return s.blockChan, nil
+ s.subscribedToNewBlocks = true
+
+ return nil
}
-func (s *subscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) (<-chan *result.NotaryRequestEvent, error) {
- if err := s.client.SubscribeForNotaryRequests(mainTXSigner); err != nil {
- return nil, fmt.Errorf("could not subscribe for notary request events: %w", err)
+func (s *subscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) error {
+ s.Lock()
+ defer s.Unlock()
+ if s.subscribedNotaryEvents[mainTXSigner] {
+ return nil
+ }
+ if _, err := s.client.ReceiveNotaryRequests(mainTXSigner, s.current.NotaryChan); err != nil {
+ return fmt.Errorf("could not subscribe for notary request events: %w", err)
}
- return s.notaryChan, nil
-}
-
-func (s *subscriber) routeNotifications(ctx context.Context) {
- notificationChan := s.client.NotificationChannel()
-
- for {
- select {
- case <-ctx.Done():
- return
- case notification, ok := <-notificationChan:
- if !ok {
- s.log.Warn("remote notification channel has been closed")
- close(s.notifyChan)
- close(s.blockChan)
- close(s.notaryChan)
-
- return
- }
-
- switch notification.Type {
- case neorpc.NotificationEventID:
- notifyEvent, ok := notification.Value.(*state.ContainedNotificationEvent)
- if !ok {
- s.log.Error("can't cast notify event value to the notify struct",
- zap.String("received type", fmt.Sprintf("%T", notification.Value)),
- )
- continue
- }
-
- s.log.Debug("new notification event from sidechain",
- zap.String("name", notifyEvent.Name),
- )
-
- s.notifyChan <- notifyEvent
- case neorpc.BlockEventID:
- b, ok := notification.Value.(*block.Block)
- if !ok {
- s.log.Error("can't cast block event value to block",
- zap.String("received type", fmt.Sprintf("%T", notification.Value)),
- )
- continue
- }
-
- s.blockChan <- b
- case neorpc.NotaryRequestEventID:
- notaryRequest, ok := notification.Value.(*result.NotaryRequestEvent)
- if !ok {
- s.log.Error("can't cast notify event value to the notary request struct",
- zap.String("received type", fmt.Sprintf("%T", notification.Value)),
- )
- continue
- }
-
- s.notaryChan <- notaryRequest
- default:
- s.log.Debug("unsupported notification from the chain",
- zap.Uint8("type", uint8(notification.Type)),
- )
- }
- }
- }
+ s.subscribedNotaryEvents[mainTXSigner] = true
+ return nil
}
// New is a constructs Neo:Morph event listener and returns Subscriber interface.
@@ -185,22 +162,173 @@ func New(ctx context.Context, p *Params) (Subscriber, error) {
}
sub := &subscriber{
- RWMutex: new(sync.RWMutex),
log: p.Log,
client: p.Client,
notifyChan: make(chan *state.ContainedNotificationEvent),
blockChan: make(chan *block.Block),
notaryChan: make(chan *result.NotaryRequestEvent),
- }
- // Worker listens all events from neo-go websocket and puts them
- // into corresponding channel. It may be notifications, transactions,
- // new blocks. For now only notifications.
+ current: newSubChannels(),
+
+ subscribedEvents: make(map[util.Uint160]bool),
+ subscribedNotaryEvents: make(map[util.Uint160]bool),
+ }
+ // Worker listens all events from temporary NeoGo channel and puts them
+ // into corresponding permanent channels.
go sub.routeNotifications(ctx)
return sub, nil
}
+func (s *subscriber) routeNotifications(ctx context.Context) {
+ var (
+ restoreCh = make(chan bool)
+ restoreInProgress bool
+ )
+
+routeloop:
+ for {
+ var connLost bool
+ s.RLock()
+ curr := s.current
+ s.RUnlock()
+ select {
+ case <-ctx.Done():
+ break routeloop
+ case ev, ok := <-curr.NotifyChan:
+ if ok {
+ s.client.Metrics().IncNotificationCount("notify")
+ s.notifyChan <- ev
+ } else {
+ connLost = true
+ }
+ case ev, ok := <-curr.BlockChan:
+ if ok {
+ s.client.Metrics().IncNotificationCount("block")
+ s.client.Metrics().SetLastBlock(ev.Index)
+ s.blockChan <- ev
+ } else {
+ connLost = true
+ }
+ case ev, ok := <-curr.NotaryChan:
+ if ok {
+ s.client.Metrics().IncNotificationCount("notary")
+ s.notaryChan <- ev
+ } else {
+ connLost = true
+ }
+ case ok := <-restoreCh:
+ restoreInProgress = false
+ if !ok {
+ connLost = true
+ }
+ }
+ if connLost {
+ if !restoreInProgress {
+ restoreInProgress = s.switchEndpoint(ctx, restoreCh)
+ if !restoreInProgress {
+ break routeloop
+ }
+ curr.drain()
+ } else { // Avoid getting additional !ok events.
+ s.Lock()
+ s.current.NotifyChan = nil
+ s.current.BlockChan = nil
+ s.current.NotaryChan = nil
+ s.Unlock()
+ }
+ }
+ }
+ close(s.notifyChan)
+ close(s.blockChan)
+ close(s.notaryChan)
+}
+
+func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool {
+ s.log.Info(ctx, logs.RPConnectionLost)
+ if !s.client.SwitchRPC(ctx) {
+ s.log.Error(ctx, logs.RPCNodeSwitchFailure)
+ return false
+ }
+
+ s.Lock()
+ chs := newSubChannels()
+ go func() {
+ finishCh <- s.restoreSubscriptions(ctx, chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
+ }()
+ s.current = chs
+ s.Unlock()
+
+ s.client.Metrics().IncSwitchCount()
+ return true
+}
+
+func newSubChannels() subChannels {
+ return subChannels{
+ NotifyChan: make(chan *state.ContainedNotificationEvent),
+ BlockChan: make(chan *block.Block),
+ NotaryChan: make(chan *result.NotaryRequestEvent),
+ }
+}
+
+func (s *subChannels) drain() {
+drainloop:
+ for {
+ select {
+ case _, ok := <-s.NotifyChan:
+ if !ok {
+ s.NotifyChan = nil
+ }
+ case _, ok := <-s.BlockChan:
+ if !ok {
+ s.BlockChan = nil
+ }
+ case _, ok := <-s.NotaryChan:
+ if !ok {
+ s.NotaryChan = nil
+ }
+ default:
+ break drainloop
+ }
+ }
+}
+
+// restoreSubscriptions restores subscriptions according to
+// cached information about them.
+func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *state.ContainedNotificationEvent,
+ blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent,
+) bool {
+ var err error
+
+ // new block events restoration
+ if s.subscribedToNewBlocks {
+ _, err = s.client.ReceiveBlocks(blCh)
+ if err != nil {
+ s.log.Error(ctx, logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
+ return false
+ }
+ }
+
+ // notification events restoration
+ for contract := range s.subscribedEvents {
+ _, err = s.client.ReceiveExecutionNotifications(contract, notifCh)
+ if err != nil {
+ s.log.Error(ctx, logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ return false
+ }
+ }
+
+ // notary notification events restoration
+ for signer := range s.subscribedNotaryEvents {
+ _, err = s.client.ReceiveNotaryRequests(signer, notaryCh)
+ if err != nil {
+ s.log.Error(ctx, logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ return false
+ }
+ }
+ return true
+}
+
// awaitHeight checks if remote client has least expected block height and
// returns error if it is not reached that height after timeout duration.
// This function is required to avoid connections to unsynced RPC nodes, because
diff --git a/pkg/morph/timer/block.go b/pkg/morph/timer/block.go
index 31c28e2ffc..974be1120a 100644
--- a/pkg/morph/timer/block.go
+++ b/pkg/morph/timer/block.go
@@ -15,41 +15,19 @@ type BlockTickHandler func()
// It can tick the blocks and perform certain actions
// on block time intervals.
type BlockTimer struct {
- rolledBack bool
-
- mtx *sync.Mutex
+ mtx sync.Mutex
dur BlockMeter
baseDur uint32
- mul, div uint32
-
cur, tgt uint32
last uint32
h BlockTickHandler
- ps []BlockTimer
-
once bool
-
- deltaCfg
-}
-
-// DeltaOption is an option of delta-interval handler.
-type DeltaOption func(*deltaCfg)
-
-type deltaCfg struct {
- pulse bool
-}
-
-// WithPulse returns option to call delta-interval handler multiple times.
-func WithPulse() DeltaOption {
- return func(c *deltaCfg) {
- c.pulse = true
- }
}
// StaticBlockMeter returns BlockMeters that always returns (d, nil).
@@ -64,55 +42,20 @@ func StaticBlockMeter(d uint32) BlockMeter {
// Reset should be called before timer ticking.
func NewBlockTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
- mtx: new(sync.Mutex),
dur: dur,
- mul: 1,
- div: 1,
h: h,
- deltaCfg: deltaCfg{
- pulse: true,
- },
}
}
// NewOneTickTimer creates a new BlockTimer that ticks only once.
-//
-// Do not use delta handlers with pulse in this timer.
func NewOneTickTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
- mtx: new(sync.Mutex),
dur: dur,
- mul: 1,
- div: 1,
h: h,
once: true,
}
}
-// OnDelta registers handler which is executed on (mul / div * BlockMeter()) block
-// after basic interval reset.
-//
-// If WithPulse option is provided, handler is executed (mul / div * BlockMeter()) block
-// during base interval.
-func (t *BlockTimer) OnDelta(mul, div uint32, h BlockTickHandler, opts ...DeltaOption) {
- c := deltaCfg{
- pulse: false,
- }
-
- for i := range opts {
- opts[i](&c)
- }
-
- t.ps = append(t.ps, BlockTimer{
- mul: mul,
- div: div,
- h: h,
- once: t.once,
-
- deltaCfg: c,
- })
-}
-
// Reset resets previous ticks of the BlockTimer.
//
// Returns BlockMeter's error upon occurrence.
@@ -126,29 +69,18 @@ func (t *BlockTimer) Reset() error {
t.resetWithBaseInterval(d)
- for i := range t.ps {
- t.ps[i].resetWithBaseInterval(d)
- }
-
t.mtx.Unlock()
return nil
}
func (t *BlockTimer) resetWithBaseInterval(d uint32) {
- t.rolledBack = false
t.baseDur = d
t.reset()
}
func (t *BlockTimer) reset() {
- mul, div := t.mul, t.div
-
- if !t.pulse && t.rolledBack && mul < div {
- mul, div = 1, 1
- }
-
- delta := mul * t.baseDur / div
+ delta := t.baseDur
if delta == 0 {
delta = 1
}
@@ -182,12 +114,7 @@ func (t *BlockTimer) tick(h uint32) {
if !t.once {
t.cur = 0
- t.rolledBack = true
t.reset()
}
}
-
- for i := range t.ps {
- t.ps[i].tick(h)
- }
}
diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go
index 93bb04de5e..a144b3db66 100644
--- a/pkg/morph/timer/block_test.go
+++ b/pkg/morph/timer/block_test.go
@@ -1,6 +1,7 @@
package timer_test
import (
+ "errors"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
@@ -8,7 +9,7 @@ import (
)
func tickN(t *timer.BlockTimer, n uint32) {
- for i := uint32(0); i < n; i++ {
+ for range n {
t.Tick(0)
}
}
@@ -17,7 +18,7 @@ func tickN(t *timer.BlockTimer, n uint32) {
// "resetting" consists of ticking the current height as well and invoking `Reset`.
func TestIRBlockTimer_Reset(t *testing.T) {
var baseCounter [2]int
- blockDur := uint32(3)
+ const blockDur = uint32(3)
bt1 := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
@@ -48,8 +49,40 @@ func TestIRBlockTimer_Reset(t *testing.T) {
require.Equal(t, baseCounter[0], baseCounter[1])
}
+func TestBlockTimer_ResetChangeDuration(t *testing.T) {
+ var dur uint32 = 2
+ var err error
+ var counter int
+
+ bt := timer.NewBlockTimer(
+ func() (uint32, error) { return dur, err },
+ func() { counter++ })
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 2)
+ require.Equal(t, 1, counter)
+
+ t.Run("return error", func(t *testing.T) {
+ dur = 5
+ err = errors.New("my awesome error")
+ require.ErrorIs(t, bt.Reset(), err)
+
+ tickN(bt, 2)
+ require.Equal(t, 2, counter)
+ })
+ t.Run("change duration", func(t *testing.T) {
+ dur = 5
+ err = nil
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 5)
+ require.Equal(t, 3, counter)
+ })
+}
+
func TestBlockTimer(t *testing.T) {
- blockDur := uint32(10)
+ const blockDur = uint32(10)
baseCallCounter := uint32(0)
bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
@@ -63,85 +96,6 @@ func TestBlockTimer(t *testing.T) {
tickN(bt, intervalNum*blockDur)
require.Equal(t, intervalNum, uint32(baseCallCounter))
-
- // add half-interval handler
- halfCallCounter := uint32(0)
-
- bt.OnDelta(1, 2, func() {
- halfCallCounter++
- })
-
- // add double interval handler
- doubleCallCounter := uint32(0)
-
- bt.OnDelta(2, 1, func() {
- doubleCallCounter++
- })
-
- require.NoError(t, bt.Reset())
-
- baseCallCounter = 0
- intervalNum = 20
-
- tickN(bt, intervalNum*blockDur)
-
- require.Equal(t, intervalNum, uint32(halfCallCounter))
- require.Equal(t, intervalNum, uint32(baseCallCounter))
- require.Equal(t, intervalNum/2, uint32(doubleCallCounter))
-}
-
-func TestDeltaPulse(t *testing.T) {
- blockDur := uint32(9)
- baseCallCounter := uint32(0)
-
- bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
- baseCallCounter++
- })
-
- deltaCallCounter := uint32(0)
-
- div := uint32(3)
-
- bt.OnDelta(1, div, func() {
- deltaCallCounter++
- }, timer.WithPulse())
-
- require.NoError(t, bt.Reset())
-
- intervalNum := uint32(7)
-
- tickN(bt, intervalNum*blockDur)
-
- require.Equal(t, intervalNum, uint32(baseCallCounter))
- require.Equal(t, intervalNum*div, uint32(deltaCallCounter))
-}
-
-func TestDeltaReset(t *testing.T) {
- blockDur := uint32(6)
- baseCallCounter := 0
-
- bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
- baseCallCounter++
- })
-
- detlaCallCounter := 0
-
- bt.OnDelta(1, 3, func() {
- detlaCallCounter++
- })
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 6)
-
- require.Equal(t, 1, baseCallCounter)
- require.Equal(t, 1, detlaCallCounter)
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 3)
-
- require.Equal(t, 2, detlaCallCounter)
}
func TestNewOneTickTimer(t *testing.T) {
@@ -168,82 +122,51 @@ func TestNewOneTickTimer(t *testing.T) {
tickN(bt, 10)
require.Equal(t, 1, baseCallCounter)
})
-
- t.Run("delta without pulse", func(t *testing.T) {
- blockDur = uint32(10)
- baseCallCounter = 0
-
- bt = timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() {
- baseCallCounter++
- })
-
- detlaCallCounter := 0
-
- bt.OnDelta(1, 10, func() {
- detlaCallCounter++
- })
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 10)
- require.Equal(t, 1, baseCallCounter)
- require.Equal(t, 1, detlaCallCounter)
-
- tickN(bt, 10) // 10 more ticks must not affect counters
- require.Equal(t, 1, baseCallCounter)
- require.Equal(t, 1, detlaCallCounter)
- })
}
func TestBlockTimer_TickSameHeight(t *testing.T) {
- var baseCounter, deltaCounter int
+ var baseCounter int
blockDur := uint32(2)
bt := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
func() { baseCounter++ })
- bt.OnDelta(2, 1, func() {
- deltaCounter++
- })
require.NoError(t, bt.Reset())
- check := func(t *testing.T, h uint32, base, delta int) {
- for i := 0; i < 2*int(blockDur); i++ {
+ check := func(t *testing.T, h uint32, base int) {
+ for range 2 * int(blockDur) {
bt.Tick(h)
require.Equal(t, base, baseCounter)
- require.Equal(t, delta, deltaCounter)
}
}
- check(t, 1, 0, 0)
- check(t, 2, 1, 0)
- check(t, 3, 1, 0)
- check(t, 4, 2, 1)
+ check(t, 1, 0)
+ check(t, 2, 1)
+ check(t, 3, 1)
+ check(t, 4, 2)
t.Run("works the same way after `Reset()`", func(t *testing.T) {
t.Run("same block duration", func(t *testing.T) {
require.NoError(t, bt.Reset())
baseCounter = 0
- deltaCounter = 0
- check(t, 1, 0, 0)
- check(t, 2, 1, 0)
- check(t, 3, 1, 0)
- check(t, 4, 2, 1)
+ check(t, 1, 0)
+ check(t, 2, 1)
+ check(t, 3, 1)
+ check(t, 4, 2)
})
t.Run("different block duration", func(t *testing.T) {
blockDur = 3
require.NoError(t, bt.Reset())
baseCounter = 0
- deltaCounter = 0
- check(t, 1, 0, 0)
- check(t, 2, 0, 0)
- check(t, 3, 1, 0)
- check(t, 4, 1, 0)
- check(t, 5, 1, 0)
- check(t, 6, 2, 1)
+ check(t, 1, 0)
+ check(t, 2, 0)
+ check(t, 3, 1)
+ check(t, 4, 1)
+ check(t, 5, 1)
+ check(t, 6, 2)
})
})
}
diff --git a/pkg/network/address.go b/pkg/network/address.go
index 0208829801..4643eef15b 100644
--- a/pkg/network/address.go
+++ b/pkg/network/address.go
@@ -1,12 +1,13 @@
package network
import (
- "fmt"
+ "errors"
"net"
"net/url"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
@@ -18,6 +19,8 @@ import (
URIAddr strings: "127.0.0.1:8080"
*/
+var errHostIsEmpty = errors.New("host is empty")
+
// Address represents the FrostFS node
// network address.
type Address struct {
@@ -41,13 +44,11 @@ func (a Address) equal(addr Address) bool {
// See also FromString.
func (a Address) URIAddr() string {
_, host, err := manet.DialArgs(a.ma)
- if err != nil {
- // the only correct way to construct Address is AddressFromString
- // which makes this error appear unexpected
- panic(fmt.Errorf("could not get host addr: %w", err))
- }
+ // the only correct way to construct Address is AddressFromString
+ // which makes this error appear unexpected
+ assert.NoError(err, "could not get host addr")
- if !a.isTLSEnabled() {
+ if !a.IsTLSEnabled() {
return host
}
@@ -88,6 +89,10 @@ func (a *Address) FromString(s string) error {
// multiaddrStringFromHostAddr converts "localhost:8080" to "/dns4/localhost/tcp/8080".
func multiaddrStringFromHostAddr(host string) (string, error) {
+ if len(host) == 0 {
+ return "", errHostIsEmpty
+ }
+
endpoint, port, err := net.SplitHostPort(host)
if err != nil {
return "", err
diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go
index 549e98b65c..63ae0bfdbf 100644
--- a/pkg/network/cache/client.go
+++ b/pkg/network/cache/client.go
@@ -5,6 +5,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
@@ -25,6 +26,7 @@ type (
Key *ecdsa.PrivateKey
ResponseCallback func(client.ResponseMetaInfo) error
AllowExternal bool
+ DialerSource *net.DialerSource
}
)
@@ -38,7 +40,7 @@ func NewSDKClientCache(opts ClientCacheOpts) *ClientCache {
}
// Get function returns existing client or creates a new one.
-func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.Client, error) {
+func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.MultiAddressClient, error) {
netAddr := info.AddressGroup()
if c.opts.AllowExternal {
netAddr = append(netAddr, info.ExternalAddressGroup()...)
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index 39c191b78c..54c1e18fb7 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -7,11 +7,16 @@ import (
"sync"
"time"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
+ tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -47,33 +52,51 @@ func newMultiClient(addr network.AddressGroup, opts ClientCacheOpts) *multiClien
}
}
-func (x *multiClient) createForAddress(addr network.Address) (clientcore.Client, error) {
- var (
- c client.Client
- prmInit client.PrmInit
- prmDial client.PrmDial
- )
-
- prmDial.SetServerURI(addr.URIAddr())
+func (x *multiClient) createForAddress(ctx context.Context, addr network.Address) (clientcore.Client, error) {
+ var c client.Client
+ prmInit := client.PrmInit{
+ DisableFrostFSErrorResolution: true,
+ }
if x.opts.Key != nil {
- prmInit.SetDefaultPrivateKey(*x.opts.Key)
+ prmInit.Key = *x.opts.Key
}
+ grpcOpts := []grpc.DialOption{
+ grpc.WithChainUnaryInterceptor(
+ qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInterceptor(),
+ tagging.NewUnaryClientInterceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
+ metrics.NewStreamClientInterceptor(),
+ tracing.NewStreamClientInterceptor(),
+ tagging.NewStreamClientInterceptor(),
+ ),
+ grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()),
+ grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
+ }
+
+ prmDial := client.PrmDial{
+ Endpoint: addr.URIAddr(),
+ GRPCDialOptions: grpcOpts,
+ }
if x.opts.DialTimeout > 0 {
- prmDial.SetTimeout(x.opts.DialTimeout)
+ prmDial.DialTimeout = x.opts.DialTimeout
}
if x.opts.StreamTimeout > 0 {
- prmDial.SetStreamTimeout(x.opts.StreamTimeout)
+ prmDial.StreamTimeout = x.opts.StreamTimeout
}
if x.opts.ResponseCallback != nil {
- prmInit.SetResponseInfoCallback(x.opts.ResponseCallback)
+ prmInit.ResponseInfoCallback = x.opts.ResponseCallback
}
c.Init(prmInit)
- err := c.Dial(prmDial)
+ err := c.Dial(ctx, prmDial)
if err != nil {
return nil, fmt.Errorf("can't init SDK client: %w", err)
}
@@ -116,6 +139,7 @@ loop:
continue loop
}
}
+ x.clients[a].invalidate()
delete(x.clients, a)
}
@@ -137,14 +161,14 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
group.IterateAddresses(func(addr network.Address) bool {
select {
case <-ctx.Done():
- firstErr = context.Canceled
+ firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled)
return true
default:
}
var err error
- c, err := x.client(addr)
+ c, err := x.client(ctx, addr)
if err == nil {
err = f(c)
}
@@ -152,17 +176,19 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
// non-status logic error that could be returned
// from the SDK client; should not be considered
// as a connection error
- var siErr *object.SplitInfoError
-
- success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr)
- if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) {
- firstErr = err
- }
+ var siErr *objectSDK.SplitInfoError
+ var eiErr *objectSDK.ECInfoError
if err != nil {
+ err = fmt.Errorf("client connection error at %v: %w", addr, err)
x.ReportError(err)
}
+ success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr) || errors.As(err, &eiErr)
+ if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) {
+ firstErr = err
+ }
+
return success
})
@@ -181,8 +207,9 @@ func (x *multiClient) ReportError(err error) {
// non-status logic error that could be returned
// from the SDK client; should not be considered
// as a connection error
- var siErr *object.SplitInfoError
- if errors.As(err, &siErr) {
+ var siErr *objectSDK.SplitInfoError
+ var eiErr *objectSDK.ECInfoError
+ if errors.As(err, &siErr) || errors.As(err, &eiErr) {
return
}
@@ -205,7 +232,7 @@ func (s *singleClient) invalidate() {
s.Unlock()
}
-func (x *multiClient) ObjectPutInit(ctx context.Context, p client.PrmObjectPutInit) (res *client.ObjectWriter, err error) {
+func (x *multiClient) ObjectPutInit(ctx context.Context, p client.PrmObjectPutInit) (res client.ObjectWriter, err error) {
err = x.iterateClients(ctx, func(c clientcore.Client) error {
res, err = c.ObjectPutInit(ctx, p)
return err
@@ -214,9 +241,9 @@ func (x *multiClient) ObjectPutInit(ctx context.Context, p client.PrmObjectPutIn
return
}
-func (x *multiClient) ContainerAnnounceUsedSpace(ctx context.Context, prm client.PrmAnnounceSpace) (res *client.ResAnnounceSpace, err error) {
+func (x *multiClient) ObjectPutSingle(ctx context.Context, p client.PrmObjectPutSingle) (res *client.ResObjectPutSingle, err error) {
err = x.iterateClients(ctx, func(c clientcore.Client) error {
- res, err = c.ContainerAnnounceUsedSpace(ctx, prm)
+ res, err = c.ObjectPutSingle(ctx, p)
return err
})
@@ -277,25 +304,7 @@ func (x *multiClient) ObjectSearchInit(ctx context.Context, p client.PrmObjectSe
return
}
-func (x *multiClient) AnnounceLocalTrust(ctx context.Context, prm client.PrmAnnounceLocalTrust) (res *client.ResAnnounceLocalTrust, err error) {
- err = x.iterateClients(ctx, func(c clientcore.Client) error {
- res, err = c.AnnounceLocalTrust(ctx, prm)
- return err
- })
-
- return
-}
-
-func (x *multiClient) AnnounceIntermediateTrust(ctx context.Context, prm client.PrmAnnounceIntermediateTrust) (res *client.ResAnnounceIntermediateTrust, err error) {
- err = x.iterateClients(ctx, func(c clientcore.Client) error {
- res, err = c.AnnounceIntermediateTrust(ctx, prm)
- return err
- })
-
- return
-}
-
-func (x *multiClient) ExecRaw(f func(client *rawclient.Client) error) error {
+func (x *multiClient) ExecRaw(func(client *rawclient.Client) error) error {
panic("multiClient.ExecRaw() must not be called")
}
@@ -315,8 +324,8 @@ func (x *multiClient) Close() error {
return nil
}
-func (x *multiClient) RawForAddress(addr network.Address, f func(client *rawclient.Client) error) error {
- c, err := x.client(addr)
+func (x *multiClient) RawForAddress(ctx context.Context, addr network.Address, f func(client *rawclient.Client) error) error {
+ c, err := x.client(ctx, addr)
if err != nil {
return err
}
@@ -328,7 +337,7 @@ func (x *multiClient) RawForAddress(addr network.Address, f func(client *rawclie
return err
}
-func (x *multiClient) client(addr network.Address) (clientcore.Client, error) {
+func (x *multiClient) client(ctx context.Context, addr network.Address) (clientcore.Client, error) {
strAddr := addr.String()
x.mtx.RLock()
@@ -369,7 +378,7 @@ func (x *multiClient) client(addr network.Address) (clientcore.Client, error) {
return nil, errRecentlyFailed
}
- cl, err := x.createForAddress(addr)
+ cl, err := x.createForAddress(ctx, addr)
if err != nil {
c.lastAttempt = time.Now()
return nil, err
diff --git a/pkg/network/group.go b/pkg/network/group.go
index c18feac276..0044fb2d4e 100644
--- a/pkg/network/group.go
+++ b/pkg/network/group.go
@@ -3,6 +3,8 @@ package network
import (
"errors"
"fmt"
+ "iter"
+ "slices"
"sort"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -57,7 +59,7 @@ func (x AddressGroup) Len() int {
// Less returns true if i-th address in AddressGroup supports TLS
// and j-th one doesn't.
func (x AddressGroup) Less(i, j int) bool {
- return x[i].isTLSEnabled() && !x[j].isTLSEnabled()
+ return x[i].IsTLSEnabled() && !x[j].IsTLSEnabled()
}
// Swap swaps i-th and j-th addresses in AddressGroup.
@@ -67,11 +69,10 @@ func (x AddressGroup) Swap(i, j int) {
// MultiAddressIterator is an interface of network address group.
type MultiAddressIterator interface {
- // Must iterate over network addresses and pass each one
- // to the handler until it returns true.
- IterateAddresses(func(string) bool)
+ // Addresses must return an iterator over network addresses.
+ Addresses() iter.Seq[string]
- // Must return number of addresses in group.
+ // NumberOfAddresses must return number of addresses in group.
NumberOfAddresses() int
}
@@ -130,19 +131,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error {
// iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f
// until 1st parsing failure or f's error.
func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) {
- iter.IterateAddresses(func(s string) bool {
+ for s := range iter.Addresses() {
var a Address
err = a.FromString(s)
if err != nil {
- err = fmt.Errorf("could not parse address from string: %w", err)
- return true
+ return fmt.Errorf("could not parse address from string: %w", err)
}
err = f(a)
-
- return err != nil
- })
+ if err != nil {
+ return err
+ }
+ }
return
}
@@ -164,10 +165,8 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) {
// at least one common address.
func (x AddressGroup) Intersects(x2 AddressGroup) bool {
for i := range x {
- for j := range x2 {
- if x[i].equal(x2[j]) {
- return true
- }
+ if slices.ContainsFunc(x2, x[i].equal) {
+ return true
}
}
diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go
index 5b335fa526..d082645336 100644
--- a/pkg/network/group_test.go
+++ b/pkg/network/group_test.go
@@ -1,6 +1,8 @@
package network
import (
+ "iter"
+ "slices"
"sort"
"testing"
@@ -58,10 +60,8 @@ func TestAddressGroup_FromIterator(t *testing.T) {
type testIterator []string
-func (t testIterator) IterateAddresses(f func(string) bool) {
- for i := range t {
- f(t[i])
- }
+func (t testIterator) Addresses() iter.Seq[string] {
+ return slices.Values(t)
}
func (t testIterator) NumberOfAddresses() int {
diff --git a/pkg/network/tls.go b/pkg/network/tls.go
index de2c93694c..544dc82407 100644
--- a/pkg/network/tls.go
+++ b/pkg/network/tls.go
@@ -11,13 +11,8 @@ const (
// tls var is used for (un)wrapping other multiaddrs around TLS multiaddr.
var tls, _ = multiaddr.NewMultiaddr("/" + tlsProtocolName)
-// isTLSEnabled searches for wrapped TLS protocol in multiaddr.
-func (a Address) isTLSEnabled() bool {
- for _, protoc := range a.ma.Protocols() {
- if protoc.Code == multiaddr.P_TLS {
- return true
- }
- }
-
- return false
+// IsTLSEnabled searches for wrapped TLS protocol in multiaddr.
+func (a Address) IsTLSEnabled() bool {
+ _, err := a.ma.ValueForProtocol(multiaddr.P_TLS)
+ return err == nil
}
diff --git a/pkg/network/tls_test.go b/pkg/network/tls_test.go
index 25775eaf16..14729f4c2b 100644
--- a/pkg/network/tls_test.go
+++ b/pkg/network/tls_test.go
@@ -24,6 +24,21 @@ func TestAddress_TLSEnabled(t *testing.T) {
err := addr.FromString(test.input)
require.NoError(t, err)
- require.Equal(t, test.wantTLS, addr.isTLSEnabled(), test.input)
+ require.Equal(t, test.wantTLS, addr.IsTLSEnabled(), test.input)
}
}
+
+func BenchmarkAddressTLSEnabled(b *testing.B) {
+ var addr Address
+ err := addr.FromString("/dns4/localhost/tcp/8080/tls")
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ var enabled bool
+ for range b.N {
+ enabled = addr.IsTLSEnabled()
+ }
+ require.True(b, enabled)
+}
diff --git a/pkg/network/transport/accounting/grpc/service.go b/pkg/network/transport/accounting/grpc/service.go
index 2144a3001e..78129bfbe0 100644
--- a/pkg/network/transport/accounting/grpc/service.go
+++ b/pkg/network/transport/accounting/grpc/service.go
@@ -3,9 +3,9 @@ package accounting
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
- accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc"
accountingsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
+ accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc"
)
// Server wraps FrostFS API Accounting service and
diff --git a/pkg/network/transport/apemanager/grpc/service.go b/pkg/network/transport/apemanager/grpc/service.go
new file mode 100644
index 0000000000..850d38a651
--- /dev/null
+++ b/pkg/network/transport/apemanager/grpc/service.go
@@ -0,0 +1,63 @@
+package apemanager
+
+import (
+ "context"
+
+ apemanager_svc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager"
+ apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
+ apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
+)
+
+type Server struct {
+ srv apemanager_svc.Server
+}
+
+var _ apemanager_grpc.APEManagerServiceServer = (*Server)(nil)
+
+func New(c apemanager_svc.Server) *Server {
+ return &Server{
+ srv: c,
+ }
+}
+
+func (s *Server) AddChain(ctx context.Context, req *apemanager_grpc.AddChainRequest) (*apemanager_grpc.AddChainResponse, error) {
+ v2req := new(apemanager_v2.AddChainRequest)
+ if err := v2req.FromGRPCMessage(req); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.srv.AddChain(ctx, v2req)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.ToGRPCMessage().(*apemanager_grpc.AddChainResponse), nil
+}
+
+func (s *Server) RemoveChain(ctx context.Context, req *apemanager_grpc.RemoveChainRequest) (*apemanager_grpc.RemoveChainResponse, error) {
+ v2req := new(apemanager_v2.RemoveChainRequest)
+ if err := v2req.FromGRPCMessage(req); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.srv.RemoveChain(ctx, v2req)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.ToGRPCMessage().(*apemanager_grpc.RemoveChainResponse), nil
+}
+
+func (s *Server) ListChains(ctx context.Context, req *apemanager_grpc.ListChainsRequest) (*apemanager_grpc.ListChainsResponse, error) {
+ v2req := new(apemanager_v2.ListChainsRequest)
+ if err := v2req.FromGRPCMessage(req); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.srv.ListChains(ctx, v2req)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.ToGRPCMessage().(*apemanager_grpc.ListChainsResponse), nil
+}
diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go
index ed514d6d4a..8cbf8d9c38 100644
--- a/pkg/network/transport/container/grpc/service.go
+++ b/pkg/network/transport/container/grpc/service.go
@@ -3,9 +3,9 @@ package container
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
containersvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
)
// Server wraps FrostFS API Container service and
@@ -81,47 +81,25 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil
}
-// SetExtendedACL converts gRPC SetExtendedACLRequest message and passes it to internal Container service.
-func (s *Server) SetExtendedACL(ctx context.Context, req *containerGRPC.SetExtendedACLRequest) (*containerGRPC.SetExtendedACLResponse, error) {
- setEACLReq := new(container.SetExtendedACLRequest)
- if err := setEACLReq.FromGRPCMessage(req); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.SetExtendedACL(ctx, setEACLReq)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*containerGRPC.SetExtendedACLResponse), nil
+type containerStreamerV2 struct {
+ containerGRPC.ContainerService_ListStreamServer
}
-// GetExtendedACL converts gRPC GetExtendedACLRequest message and passes it to internal Container service.
-func (s *Server) GetExtendedACL(ctx context.Context, req *containerGRPC.GetExtendedACLRequest) (*containerGRPC.GetExtendedACLResponse, error) {
- getEACLReq := new(container.GetExtendedACLRequest)
- if err := getEACLReq.FromGRPCMessage(req); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.GetExtendedACL(ctx, getEACLReq)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*containerGRPC.GetExtendedACLResponse), nil
+func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error {
+ return s.ContainerService_ListStreamServer.Send(
+ resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse),
+ )
}
-// AnnounceUsedSpace converts gRPC AnnounceUsedSpaceRequest message and passes it to internal Container service.
-func (s *Server) AnnounceUsedSpace(ctx context.Context, req *containerGRPC.AnnounceUsedSpaceRequest) (*containerGRPC.AnnounceUsedSpaceResponse, error) {
- announceReq := new(container.AnnounceUsedSpaceRequest)
- if err := announceReq.FromGRPCMessage(req); err != nil {
- return nil, err
+// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data
+// to gRPC stream.
+func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error {
+ listReq := new(container.ListStreamRequest)
+ if err := listReq.FromGRPCMessage(req); err != nil {
+ return err
}
- resp, err := s.srv.AnnounceUsedSpace(ctx, announceReq)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*containerGRPC.AnnounceUsedSpaceResponse), nil
+ return s.srv.ListStream(listReq, &containerStreamerV2{
+ ContainerService_ListStreamServer: gStream,
+ })
}
diff --git a/pkg/network/transport/netmap/grpc/service.go b/pkg/network/transport/netmap/grpc/service.go
index 7a3aec86c2..4bc3a42f81 100644
--- a/pkg/network/transport/netmap/grpc/service.go
+++ b/pkg/network/transport/netmap/grpc/service.go
@@ -3,9 +3,9 @@ package grpc
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
- netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
netmapsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
+ netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc"
)
// Server wraps FrostFS API Netmap service and
@@ -24,7 +24,8 @@ func New(c netmapsvc.Server) *Server {
// LocalNodeInfo converts gRPC request message and passes it to internal netmap service.
func (s *Server) LocalNodeInfo(
ctx context.Context,
- req *netmapGRPC.LocalNodeInfoRequest) (*netmapGRPC.LocalNodeInfoResponse, error) {
+ req *netmapGRPC.LocalNodeInfoRequest,
+) (*netmapGRPC.LocalNodeInfoResponse, error) {
nodeInfoReq := new(netmap.LocalNodeInfoRequest)
if err := nodeInfoReq.FromGRPCMessage(req); err != nil {
return nil, err
diff --git a/pkg/network/transport/object/grpc/get.go b/pkg/network/transport/object/grpc/get.go
index e1655c183e..655b1f9fb1 100644
--- a/pkg/network/transport/object/grpc/get.go
+++ b/pkg/network/transport/object/grpc/get.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
type getStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/range.go b/pkg/network/transport/object/grpc/range.go
index 391536e8ef..7d7ce0e4cf 100644
--- a/pkg/network/transport/object/grpc/range.go
+++ b/pkg/network/transport/object/grpc/range.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
type getRangeStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/search.go b/pkg/network/transport/object/grpc/search.go
index a151ced096..8432707f7b 100644
--- a/pkg/network/transport/object/grpc/search.go
+++ b/pkg/network/transport/object/grpc/search.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
type searchStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go
index 82e323a3cd..15dacd5539 100644
--- a/pkg/network/transport/object/grpc/service.go
+++ b/pkg/network/transport/object/grpc/service.go
@@ -5,10 +5,10 @@ import (
"errors"
"io"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
// Server wraps FrostFS API Object service and
@@ -24,6 +24,48 @@ func New(c objectSvc.ServiceServer) *Server {
}
}
+// Patch opens internal Object patch stream and feeds it by the data read from gRPC stream.
+func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
+ stream, err := s.srv.Patch(gStream.Context())
+ if err != nil {
+ return err
+ }
+
+ for {
+ req, err := gStream.Recv()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ resp, err := stream.CloseAndRecv(gStream.Context())
+ if err != nil {
+ return err
+ }
+
+ return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse))
+ }
+
+ return err
+ }
+
+ patchReq := new(object.PatchRequest)
+ if err := patchReq.FromGRPCMessage(req); err != nil {
+ return err
+ }
+
+ if err := stream.Send(gStream.Context(), patchReq); err != nil {
+ if errors.Is(err, util.ErrAbortStream) {
+ resp, err := stream.CloseAndRecv(gStream.Context())
+ if err != nil {
+ return err
+ }
+
+ return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse))
+ }
+
+ return err
+ }
+ }
+}
+
// Put opens internal Object service Put stream and overtakes data from gRPC stream to it.
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
stream, err := s.srv.Put(gStream.Context())
@@ -35,7 +77,7 @@ func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
req, err := gStream.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
- resp, err := stream.CloseAndRecv()
+ resp, err := stream.CloseAndRecv(gStream.Context())
if err != nil {
return err
}
@@ -51,9 +93,9 @@ func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
return err
}
- if err := stream.Send(putReq); err != nil {
+ if err := stream.Send(gStream.Context(), putReq); err != nil {
if errors.Is(err, util.ErrAbortStream) {
- resp, err := stream.CloseAndRecv()
+ resp, err := stream.CloseAndRecv(gStream.Context())
if err != nil {
return err
}
@@ -110,3 +152,17 @@ func (s *Server) GetRangeHash(ctx context.Context, req *objectGRPC.GetRangeHashR
return resp.ToGRPCMessage().(*objectGRPC.GetRangeHashResponse), nil
}
+
+func (s *Server) PutSingle(ctx context.Context, req *objectGRPC.PutSingleRequest) (*objectGRPC.PutSingleResponse, error) {
+ putSingleReq := &object.PutSingleRequest{}
+ if err := putSingleReq.FromGRPCMessage(req); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.srv.PutSingle(ctx, putSingleReq)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.ToGRPCMessage().(*objectGRPC.PutSingleResponse), nil
+}
diff --git a/pkg/network/transport/reputation/grpc/service.go b/pkg/network/transport/reputation/grpc/service.go
deleted file mode 100644
index bb90743240..0000000000
--- a/pkg/network/transport/reputation/grpc/service.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package grpcreputation
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
- reputation2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation/grpc"
- reputationrpc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/rpc"
-)
-
-// Server wraps FrostFS API v2 Reputation service server
-// and provides gRPC Reputation service server interface.
-type Server struct {
- srv reputationrpc.Server
-}
-
-// New creates, initializes and returns Server instance.
-func New(srv reputationrpc.Server) *Server {
- return &Server{
- srv: srv,
- }
-}
-
-func (s *Server) AnnounceLocalTrust(ctx context.Context, r *reputation2.AnnounceLocalTrustRequest) (*reputation2.AnnounceLocalTrustResponse, error) {
- req := new(reputation.AnnounceLocalTrustRequest)
- if err := req.FromGRPCMessage(r); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.AnnounceLocalTrust(ctx, req)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*reputation2.AnnounceLocalTrustResponse), nil
-}
-
-func (s *Server) AnnounceIntermediateResult(ctx context.Context, r *reputation2.AnnounceIntermediateResultRequest) (*reputation2.AnnounceIntermediateResultResponse, error) {
- req := new(reputation.AnnounceIntermediateResultRequest)
- if err := req.FromGRPCMessage(r); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.AnnounceIntermediateResult(ctx, req)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*reputation2.AnnounceIntermediateResultResponse), nil
-}
diff --git a/pkg/network/transport/session/grpc/service.go b/pkg/network/transport/session/grpc/service.go
index e0dc749426..6fce397f35 100644
--- a/pkg/network/transport/session/grpc/service.go
+++ b/pkg/network/transport/session/grpc/service.go
@@ -3,9 +3,9 @@ package session
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc"
sessionsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc"
)
// Server wraps FrostFS API Session service and
diff --git a/pkg/network/validation.go b/pkg/network/validation.go
index 92f6501193..b5157f28f6 100644
--- a/pkg/network/validation.go
+++ b/pkg/network/validation.go
@@ -2,6 +2,7 @@ package network
import (
"errors"
+ "iter"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -34,8 +35,8 @@ var (
// MultiAddressIterator.
type NodeEndpointsIterator netmap.NodeInfo
-func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) {
- (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
+func (x NodeEndpointsIterator) Addresses() iter.Seq[string] {
+ return (netmap.NodeInfo)(x).NetworkEndpoints()
}
func (x NodeEndpointsIterator) NumberOfAddresses() int {
diff --git a/pkg/services/accounting/executor.go b/pkg/services/accounting/executor.go
index 4028459571..93e44c52b8 100644
--- a/pkg/services/accounting/executor.go
+++ b/pkg/services/accounting/executor.go
@@ -4,7 +4,8 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
type ServiceExecutor interface {
@@ -12,13 +13,15 @@ type ServiceExecutor interface {
}
type executorSvc struct {
- exec ServiceExecutor
+ exec ServiceExecutor
+ respSvc *response.Service
}
// NewExecutionService wraps ServiceExecutor and returns Accounting Service interface.
-func NewExecutionService(exec ServiceExecutor) Server {
+func NewExecutionService(exec ServiceExecutor, respSvc *response.Service) Server {
return &executorSvc{
- exec: exec,
+ exec: exec,
+ respSvc: respSvc,
}
}
@@ -31,5 +34,6 @@ func (s *executorSvc) Balance(ctx context.Context, req *accounting.BalanceReques
resp := new(accounting.BalanceResponse)
resp.SetBody(respBody)
+ s.respSvc.SetMeta(resp)
return resp, nil
}
diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go
index 434c895082..6c2df84282 100644
--- a/pkg/services/accounting/morph/executor.go
+++ b/pkg/services/accounting/morph/executor.go
@@ -5,9 +5,9 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
accountingSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceReq
return nil, fmt.Errorf("invalid account: %w", err)
}
- amount, err := s.client.BalanceOf(id)
+ amount, err := s.client.BalanceOf(ctx, id)
if err != nil {
return nil, err
}
- balancePrecision, err := s.client.Decimals()
+ balancePrecision, err := s.client.Decimals(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/services/accounting/response.go b/pkg/services/accounting/response.go
deleted file mode 100644
index a78ac6fd63..0000000000
--- a/pkg/services/accounting/response.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package accounting
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
-)
-
-type responseService struct {
- respSvc *response.Service
-
- svc Server
-}
-
-// NewResponseService returns accounting service instance that passes internal service
-// call to response service.
-func NewResponseService(accSvc Server, respSvc *response.Service) Server {
- return &responseService{
- respSvc: respSvc,
- svc: accSvc,
- }
-}
-
-func (s *responseService) Balance(ctx context.Context, req *accounting.BalanceRequest) (*accounting.BalanceResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Balance(ctx, req.(*accounting.BalanceRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*accounting.BalanceResponse), nil
-}
diff --git a/pkg/services/accounting/server.go b/pkg/services/accounting/server.go
index 72833c46cd..a280416fb9 100644
--- a/pkg/services/accounting/server.go
+++ b/pkg/services/accounting/server.go
@@ -3,7 +3,7 @@ package accounting
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
// Server is an interface of the FrostFS API Accounting service server.
diff --git a/pkg/services/accounting/sign.go b/pkg/services/accounting/sign.go
index e98d9b3af2..d8feb76bd5 100644
--- a/pkg/services/accounting/sign.go
+++ b/pkg/services/accounting/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
type signService struct {
@@ -22,17 +22,6 @@ func NewSignService(key *ecdsa.PrivateKey, svc Server) Server {
}
func (s *signService) Balance(ctx context.Context, req *accounting.BalanceRequest) (*accounting.BalanceResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Balance(ctx, req.(*accounting.BalanceRequest))
- },
- func() util.ResponseMessage {
- return new(accounting.BalanceResponse)
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*accounting.BalanceResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.Balance(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go
new file mode 100644
index 0000000000..61fb025b89
--- /dev/null
+++ b/pkg/services/apemanager/audit.go
@@ -0,0 +1,75 @@
+package apemanager
+
+import (
+ "context"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
+ ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
+)
+
+var _ Server = (*auditService)(nil)
+
+type auditService struct {
+ next Server
+ log *logger.Logger
+ enabled *atomic.Bool
+}
+
+func NewAuditService(next Server, log *logger.Logger, enabled *atomic.Bool) Server {
+ return &auditService{
+ next: next,
+ log: log,
+ enabled: enabled,
+ }
+}
+
+// AddChain implements Server.
+func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainRequest) (*apemanager.AddChainResponse, error) {
+ res, err := a.next.AddChain(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+
+ audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
+ audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
+ req.GetBody().GetTarget().GetName(),
+ res.GetBody().GetChainID()),
+ err == nil)
+
+ return res, err
+}
+
+// ListChains implements Server.
+func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChainsRequest) (*apemanager.ListChainsResponse, error) {
+ res, err := a.next.ListChains(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+
+ audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
+ audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
+ req.GetBody().GetTarget().GetName(),
+ nil),
+ err == nil)
+
+ return res, err
+}
+
+// RemoveChain implements Server.
+func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveChainRequest) (*apemanager.RemoveChainResponse, error) {
+ res, err := a.next.RemoveChain(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+
+ audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
+ audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
+ req.GetBody().GetTarget().GetName(),
+ req.GetBody().GetChainID()),
+ err == nil)
+
+ return res, err
+}
diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go
new file mode 100644
index 0000000000..1d485321c2
--- /dev/null
+++ b/pkg/services/apemanager/errors/errors.go
@@ -0,0 +1,17 @@
+package errors
+
+import (
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+)
+
+func ErrAPEManagerAccessDenied(reason string) error {
+ err := new(apistatus.APEManagerAccessDenied)
+ err.WriteReason(reason)
+ return err
+}
+
+func ErrAPEManagerInvalidArgument(msg string) error {
+ err := new(apistatus.InvalidArgument)
+ err.SetMessage(msg)
+ return err
+}
diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go
new file mode 100644
index 0000000000..fc08fe569a
--- /dev/null
+++ b/pkg/services/apemanager/executor.go
@@ -0,0 +1,261 @@
+package apemanager
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "errors"
+ "fmt"
+
+ ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage"
+ containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ apemanager_errors "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager/errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ apeV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/ape"
+ apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "github.com/mr-tron/base58/base58"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "go.uber.org/zap"
+)
+
+var errEmptyBodySignature = errors.New("malformed request: empty body signature")
+
+type cfg struct {
+ log *logger.Logger
+}
+
+type Service struct {
+ cfg
+
+ waiter Waiter
+
+ cnrSrc containercore.Source
+
+ contractStorage ape_contract.ProxyAdaptedContractStorage
+}
+
+type Option func(*cfg)
+
+type Waiter interface {
+ WaitTxHalt(context.Context, uint32, util.Uint256) error
+}
+
+func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service {
+ s := &Service{
+ cnrSrc: cnrSrc,
+
+ contractStorage: contractStorage,
+
+ waiter: waiter,
+ }
+
+ for i := range opts {
+ opts[i](&s.cfg)
+ }
+
+ if s.log == nil {
+ s.log = logger.NewLoggerWrapper(zap.NewNop())
+ }
+
+ return s
+}
+
+func WithLogger(log *logger.Logger) Option {
+ return func(c *cfg) {
+ c.log = log
+ }
+}
+
+var _ Server = (*Service)(nil)
+
+// validateContainerTargetRequest validates request for the container target.
+// It checks if request actor is the owner of the container, otherwise it denies the request.
+func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error {
+ var cidSDK cidSDK.ID
+ if err := cidSDK.DecodeString(cid); err != nil {
+ return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err))
+ }
+ isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey)
+ if err != nil {
+ return fmt.Errorf("failed to check owner: %w", err)
+ }
+ if !isOwner {
+ return apemanager_errors.ErrAPEManagerAccessDenied("actor must be container owner")
+ }
+ return nil
+}
+
+func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
+ pub, err := getSignaturePublicKey(req.GetVerificationHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw())
+ if err != nil {
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error())
+ }
+ if len(chain.ID) == 0 {
+ const randomIDLength = 10
+ randID, err := base58Str(randomIDLength)
+ if err != nil {
+ return nil, fmt.Errorf("randomize chain ID error: %w", err)
+ }
+ chain.ID = apechain.ID(randID)
+ }
+
+ var target policy_engine.Target
+
+ switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
+ case apeV2.TargetTypeContainer:
+ reqCID := req.GetBody().GetTarget().GetName()
+ if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
+ return nil, err
+ }
+ target = policy_engine.ContainerTarget(reqCID)
+ default:
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
+ }
+
+ txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
+ return nil, err
+ }
+
+ body := new(apemanagerV2.AddChainResponseBody)
+ body.SetChainID(chain.ID)
+
+ resp := new(apemanagerV2.AddChainResponse)
+ resp.SetBody(body)
+
+ return resp, nil
+}
+
+func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
+ pub, err := getSignaturePublicKey(req.GetVerificationHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ var target policy_engine.Target
+
+ switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
+ case apeV2.TargetTypeContainer:
+ reqCID := req.GetBody().GetTarget().GetName()
+ if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
+ return nil, err
+ }
+ target = policy_engine.ContainerTarget(reqCID)
+ default:
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
+ }
+
+ txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID())
+ if err != nil {
+ return nil, err
+ }
+ if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
+ return nil, err
+ }
+
+ body := new(apemanagerV2.RemoveChainResponseBody)
+
+ resp := new(apemanagerV2.RemoveChainResponse)
+ resp.SetBody(body)
+
+ return resp, nil
+}
+
+func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
+ pub, err := getSignaturePublicKey(req.GetVerificationHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ var target policy_engine.Target
+
+ switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
+ case apeV2.TargetTypeContainer:
+ reqCID := req.GetBody().GetTarget().GetName()
+ if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
+ return nil, err
+ }
+ target = policy_engine.ContainerTarget(reqCID)
+ default:
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
+ }
+
+ chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]*apeV2.Chain, 0, len(chs))
+ for _, ch := range chs {
+ v2chraw := new(apeV2.ChainRaw)
+ v2chraw.SetRaw(ch.Bytes())
+
+ v2ch := new(apeV2.Chain)
+ v2ch.SetKind(v2chraw)
+
+ res = append(res, v2ch)
+ }
+
+ body := new(apemanagerV2.ListChainsResponseBody)
+ body.SetChains(res)
+
+ resp := new(apemanagerV2.ListChainsResponse)
+ resp.SetBody(body)
+
+ return resp, nil
+}
+
+func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicKey, error) {
+ for vh.GetOrigin() != nil {
+ vh = vh.GetOrigin()
+ }
+ sig := vh.GetBodySignature()
+ if sig == nil {
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error())
+ }
+ key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
+ if err != nil {
+ return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err))
+ }
+
+ return key, nil
+}
+
+func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
+ var actor user.ID
+ user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
+ actorOwnerID := new(refs.OwnerID)
+ actor.WriteToV2(actorOwnerID)
+
+ cnr, err := s.cnrSrc.Get(ctx, cid)
+ if err != nil {
+ return false, fmt.Errorf("get container error: %w", err)
+ }
+ return cnr.Value.Owner().Equals(actor), nil
+}
+
+// base58Str generates base58 string.
+func base58Str(n int) (string, error) {
+ b := make([]byte, n)
+ _, err := rand.Read(b)
+ if err != nil {
+ return "", err
+ }
+ return base58.FastBase58Encoding(b), nil
+}
diff --git a/pkg/services/apemanager/server.go b/pkg/services/apemanager/server.go
new file mode 100644
index 0000000000..e624177ac8
--- /dev/null
+++ b/pkg/services/apemanager/server.go
@@ -0,0 +1,13 @@
+package apemanager
+
+import (
+ "context"
+
+ apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
+)
+
+type Server interface {
+ AddChain(context.Context, *apemanager_v2.AddChainRequest) (*apemanager_v2.AddChainResponse, error)
+ RemoveChain(context.Context, *apemanager_v2.RemoveChainRequest) (*apemanager_v2.RemoveChainResponse, error)
+ ListChains(context.Context, *apemanager_v2.ListChainsRequest) (*apemanager_v2.ListChainsResponse, error)
+}
diff --git a/pkg/services/apemanager/sign.go b/pkg/services/apemanager/sign.go
new file mode 100644
index 0000000000..a172624fff
--- /dev/null
+++ b/pkg/services/apemanager/sign.go
@@ -0,0 +1,49 @@
+package apemanager
+
+import (
+ "context"
+ "crypto/ecdsa"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
+)
+
+type signService struct {
+ sigSvc *util.SignService
+
+ next Server
+}
+
+func NewSignService(key *ecdsa.PrivateKey, next Server) Server {
+ return &signService{
+ sigSvc: util.NewUnarySignService(key),
+ next: next,
+ }
+}
+
+func (s *signService) AddChain(ctx context.Context, req *apemanager_v2.AddChainRequest) (*apemanager_v2.AddChainResponse, error) {
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(apemanager_v2.AddChainResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
+ }
+ resp, err := util.EnsureNonNilResponse(s.next.AddChain(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
+}
+
+func (s *signService) RemoveChain(ctx context.Context, req *apemanager_v2.RemoveChainRequest) (*apemanager_v2.RemoveChainResponse, error) {
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(apemanager_v2.RemoveChainResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
+ }
+ resp, err := util.EnsureNonNilResponse(s.next.RemoveChain(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
+}
+
+func (s *signService) ListChains(ctx context.Context, req *apemanager_v2.ListChainsRequest) (*apemanager_v2.ListChainsResponse, error) {
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(apemanager_v2.ListChainsResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
+ }
+ resp, err := util.EnsureNonNilResponse(s.next.ListChains(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
+}
diff --git a/pkg/services/apemanager/validation.go b/pkg/services/apemanager/validation.go
new file mode 100644
index 0000000000..b26fcf8eef
--- /dev/null
+++ b/pkg/services/apemanager/validation.go
@@ -0,0 +1,23 @@
+package apemanager
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ape"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+)
+
+func decodeAndValidateChain(encodedChain []byte) (chain apechain.Chain, err error) {
+ if err = chain.DecodeBytes(encodedChain); err != nil {
+ return
+ }
+ for _, rule := range chain.Rules {
+ for _, name := range rule.Resources.Names {
+ if err = ape.ValidateResourceName(name); err != nil {
+ err = fmt.Errorf("invalid resource: %w", err)
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/pkg/services/audit/auditor/context.go b/pkg/services/audit/auditor/context.go
deleted file mode 100644
index 528a3bbb51..0000000000
--- a/pkg/services/audit/auditor/context.go
+++ /dev/null
@@ -1,301 +0,0 @@
-package auditor
-
-import (
- "context"
- "sync"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/atomic"
- "go.uber.org/zap"
-)
-
-// Context represents container data audit execution context.
-type Context struct {
- ContextPrm
-
- task *audit.Task
-
- report *audit.Report
-
- sgMembersMtx sync.RWMutex
- sgMembersCache map[oid.ID][]oid.ID
-
- placementMtx sync.Mutex
- placementCache map[string][][]netmap.NodeInfo
-
- porRequests, porRetries atomic.Uint32
-
- pairs []gamePair
-
- pairedMtx sync.Mutex
- pairedNodes map[uint64]*pairMemberInfo
-
- counters struct {
- hit, miss, fail uint32
- }
-
- cnrNodesNum int
-
- headMtx sync.RWMutex
- headResponses map[string]shortHeader
-}
-
-type pairMemberInfo struct {
- failedPDP, passedPDP bool // at least one
-
- node netmap.NodeInfo
-}
-
-type gamePair struct {
- n1, n2 netmap.NodeInfo
-
- id oid.ID
-
- rn1, rn2 []*object.Range
-
- hh1, hh2 [][]byte
-}
-
-type shortHeader struct {
- tzhash []byte
-
- objectSize uint64
-}
-
-// ContextPrm groups components required to conduct data audit checks.
-type ContextPrm struct {
- maxPDPSleep uint64
-
- log *logger.Logger
-
- cnrCom ContainerCommunicator
-
- pdpWorkerPool, porWorkerPool util.WorkerPool
-}
-
-type commonCommunicatorPrm struct {
- Context context.Context
-
- Node netmap.NodeInfo
-
- OID oid.ID
- CID cid.ID
-}
-
-// GetHeaderPrm groups parameter of GetHeader operation.
-type GetHeaderPrm struct {
- commonCommunicatorPrm
-
- NodeIsRelay bool
-}
-
-// GetRangeHashPrm groups parameter of GetRangeHash operation.
-type GetRangeHashPrm struct {
- commonCommunicatorPrm
-
- Range *object.Range
-}
-
-// ContainerCommunicator is an interface of
-// component of communication with container nodes.
-type ContainerCommunicator interface {
- // GetHeader must return object header from the container node.
- GetHeader(GetHeaderPrm) (*object.Object, error)
-
- // GetRangeHash must return homomorphic Tillich-Zemor hash of payload range of the
- // object stored in container node.
- GetRangeHash(GetRangeHashPrm) ([]byte, error)
-}
-
-// NewContext creates, initializes and returns Context.
-func NewContext(prm ContextPrm) *Context {
- return &Context{
- ContextPrm: prm,
- }
-}
-
-// SetLogger sets logging component.
-func (p *ContextPrm) SetLogger(l *logger.Logger) {
- if p != nil {
- p.log = l
- }
-}
-
-// SetContainerCommunicator sets component of communication with container nodes.
-func (p *ContextPrm) SetContainerCommunicator(cnrCom ContainerCommunicator) {
- if p != nil {
- p.cnrCom = cnrCom
- }
-}
-
-// SetMaxPDPSleep sets maximum sleep interval between range hash requests.
-// as part of PDP check.
-func (p *ContextPrm) SetMaxPDPSleep(dur time.Duration) {
- if p != nil {
- p.maxPDPSleep = uint64(dur)
- }
-}
-
-// WithTask sets container audit parameters.
-func (c *Context) WithTask(t *audit.Task) *Context {
- if c != nil {
- c.task = t
- }
-
- return c
-}
-
-// WithPDPWorkerPool sets worker pool for PDP pairs processing.
-func (c *Context) WithPDPWorkerPool(pool util.WorkerPool) *Context {
- if c != nil {
- c.pdpWorkerPool = pool
- }
-
- return c
-}
-
-// WithPoRWorkerPool sets worker pool for PoR SG processing.
-func (c *Context) WithPoRWorkerPool(pool util.WorkerPool) *Context {
- if c != nil {
- c.porWorkerPool = pool
- }
-
- return c
-}
-
-func (c *Context) containerID() cid.ID {
- return c.task.ContainerID()
-}
-
-func (c *Context) init() {
- c.report = audit.NewReport(c.containerID())
-
- c.sgMembersCache = make(map[oid.ID][]oid.ID)
-
- c.placementCache = make(map[string][][]netmap.NodeInfo)
-
- cnrVectors := c.task.ContainerNodes()
- for i := range cnrVectors {
- c.cnrNodesNum += len(cnrVectors[i])
- }
-
- c.pairedNodes = make(map[uint64]*pairMemberInfo)
-
- c.headResponses = make(map[string]shortHeader)
-
- c.log = &logger.Logger{Logger: c.log.With(
- zap.Stringer("container ID", c.task.ContainerID()),
- )}
-}
-
-func (c *Context) expired() bool {
- ctx := c.task.AuditContext()
-
- select {
- case <-ctx.Done():
- c.log.Debug("audit context is done",
- zap.String("error", ctx.Err().Error()),
- )
-
- return true
- default:
- return false
- }
-}
-
-func (c *Context) complete() {
- c.report.Complete()
-}
-
-func (c *Context) writeReport() {
- c.log.Debug("writing audit report...")
-
- if err := c.task.Reporter().WriteReport(c.report); err != nil {
- c.log.Error("could not write audit report")
- }
-}
-
-func (c *Context) buildPlacement(id oid.ID) ([][]netmap.NodeInfo, error) {
- c.placementMtx.Lock()
- defer c.placementMtx.Unlock()
-
- strID := id.EncodeToString()
-
- if nn, ok := c.placementCache[strID]; ok {
- return nn, nil
- }
-
- nn, err := placement.BuildObjectPlacement(
- c.task.NetworkMap(),
- c.task.ContainerNodes(),
- &id,
- )
- if err != nil {
- return nil, err
- }
-
- c.placementCache[strID] = nn
-
- return nn, nil
-}
-
-func (c *Context) objectSize(id oid.ID) uint64 {
- c.headMtx.RLock()
- defer c.headMtx.RUnlock()
-
- strID := id.EncodeToString()
-
- if hdr, ok := c.headResponses[strID]; ok {
- return hdr.objectSize
- }
-
- return 0
-}
-
-func (c *Context) objectHomoHash(id oid.ID) []byte {
- c.headMtx.RLock()
- defer c.headMtx.RUnlock()
-
- strID := id.EncodeToString()
-
- if hdr, ok := c.headResponses[strID]; ok {
- return hdr.tzhash
- }
-
- return nil
-}
-
-func (c *Context) updateHeadResponses(hdr *object.Object) {
- id, ok := hdr.ID()
- if !ok {
- return
- }
-
- strID := id.EncodeToString()
- cs, _ := hdr.PayloadHomomorphicHash()
-
- c.headMtx.Lock()
- defer c.headMtx.Unlock()
-
- if _, ok := c.headResponses[strID]; !ok {
- c.headResponses[strID] = shortHeader{
- tzhash: cs.Value(),
- objectSize: hdr.PayloadSize(),
- }
- }
-}
-
-func (c *Context) updateSGInfo(id oid.ID, members []oid.ID) {
- c.sgMembersMtx.Lock()
- defer c.sgMembersMtx.Unlock()
-
- c.sgMembersCache[id] = members
-}
diff --git a/pkg/services/audit/auditor/exec.go b/pkg/services/audit/auditor/exec.go
deleted file mode 100644
index ceb6556e25..0000000000
--- a/pkg/services/audit/auditor/exec.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package auditor
-
-import (
- "fmt"
-)
-
-// Execute audits container data.
-func (c *Context) Execute() {
- c.init()
-
- checks := []struct {
- name string
- exec func()
- }{
- {name: "PoR", exec: c.executePoR},
- {name: "PoP", exec: c.executePoP},
- {name: "PDP", exec: c.executePDP},
- }
-
- for i := range checks {
- c.log.Debug(fmt.Sprintf("executing %s check...", checks[i].name))
-
- if c.expired() {
- break
- }
-
- checks[i].exec()
-
- if i == len(checks)-1 {
- c.complete()
- }
- }
-
- c.writeReport()
-}
diff --git a/pkg/services/audit/auditor/pdp.go b/pkg/services/audit/auditor/pdp.go
deleted file mode 100644
index beb2fdcf88..0000000000
--- a/pkg/services/audit/auditor/pdp.go
+++ /dev/null
@@ -1,239 +0,0 @@
-package auditor
-
-import (
- "bytes"
- "sync"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
- "go.uber.org/zap"
-)
-
-func (c *Context) executePDP() {
- c.processPairs()
- c.writePairsResult()
-}
-
-func (c *Context) processPairs() {
- wg := new(sync.WaitGroup)
-
- for i := range c.pairs {
- p := &c.pairs[i]
- wg.Add(1)
-
- if err := c.pdpWorkerPool.Submit(func() {
- c.processPair(p)
- wg.Done()
- }); err != nil {
- wg.Done()
- }
- }
-
- wg.Wait()
- c.pdpWorkerPool.Release()
-}
-
-func (c *Context) processPair(p *gamePair) {
- c.distributeRanges(p)
- c.collectHashes(p)
- c.analyzeHashes(p)
-}
-
-func (c *Context) distributeRanges(p *gamePair) {
- p.rn1 = make([]*object.Range, hashRangeNumber-1)
- p.rn2 = make([]*object.Range, hashRangeNumber-1)
-
- for i := 0; i < hashRangeNumber-1; i++ {
- p.rn1[i] = object.NewRange()
- p.rn2[i] = object.NewRange()
- }
-
- notches := c.splitPayload(p.id)
-
- { // node 1
- // [0:n2]
- p.rn1[0].SetLength(notches[1])
-
- // [n2:n3]
- p.rn1[1].SetOffset(notches[1])
- p.rn1[1].SetLength(notches[2] - notches[1])
-
- // [n3:full]
- p.rn1[2].SetOffset(notches[2])
- p.rn1[2].SetLength(notches[3] - notches[2])
- }
-
- { // node 2
- // [0:n1]
- p.rn2[0].SetLength(notches[0])
-
- // [n1:n2]
- p.rn2[1].SetOffset(notches[0])
- p.rn2[1].SetLength(notches[1] - notches[0])
-
- // [n2:full]
- p.rn2[2].SetOffset(notches[1])
- p.rn2[2].SetLength(notches[3] - notches[1])
- }
-}
-
-func (c *Context) splitPayload(id oid.ID) []uint64 {
- var (
- prev uint64
- size = c.objectSize(id)
- notches = make([]uint64, 0, hashRangeNumber)
- )
-
- for i := uint64(0); i < hashRangeNumber; i++ {
- if i < hashRangeNumber-1 {
- max := size - prev - (hashRangeNumber - i)
- if max == 0 {
- prev++
- } else {
- prev += rand.Uint64()%max + 1
- }
- } else {
- prev = size
- }
-
- notches = append(notches, prev)
- }
-
- return notches
-}
-
-func (c *Context) collectHashes(p *gamePair) {
- fn := func(n netmap.NodeInfo, rngs []*object.Range) [][]byte {
- // Here we randomize the order a bit: the hypothesis is that this
- // makes it harder for an unscrupulous node to come up with a
- // reliable cheating strategy.
- order := make([]int, len(rngs))
- for i := range order {
- order[i] = i
- }
- rand.Shuffle(len(order), func(i, j int) { order[i], order[j] = order[j], order[i] })
-
- var getRangeHashPrm GetRangeHashPrm
- getRangeHashPrm.Context = c.task.AuditContext()
- getRangeHashPrm.CID = c.task.ContainerID()
- getRangeHashPrm.OID = p.id
- getRangeHashPrm.Node = n
-
- res := make([][]byte, len(rngs))
- for _, i := range order {
- var sleepDur time.Duration
- if c.maxPDPSleep > 0 {
- sleepDur = time.Duration(rand.Uint64() % c.maxPDPSleep)
- }
-
- c.log.Debug("sleep before get range hash",
- zap.Stringer("interval", sleepDur),
- )
-
- time.Sleep(sleepDur)
-
- getRangeHashPrm.Range = rngs[i]
-
- h, err := c.cnrCom.GetRangeHash(getRangeHashPrm)
- if err != nil {
- c.log.Debug("could not get payload range hash",
- zap.Stringer("id", p.id),
- zap.String("node", netmap.StringifyPublicKey(n)),
- zap.String("error", err.Error()),
- )
- return res
- }
- res[i] = h
- }
- return res
- }
-
- p.hh1 = fn(p.n1, p.rn1)
- p.hh2 = fn(p.n2, p.rn2)
-}
-
-func (c *Context) analyzeHashes(p *gamePair) {
- if len(p.hh1) != hashRangeNumber-1 || len(p.hh2) != hashRangeNumber-1 {
- c.failNodesPDP(p.n1, p.n2)
- return
- }
-
- h1, err := tz.Concat([][]byte{p.hh2[0], p.hh2[1]})
- if err != nil || !bytes.Equal(p.hh1[0], h1) {
- c.failNodesPDP(p.n1, p.n2)
- return
- }
-
- h2, err := tz.Concat([][]byte{p.hh1[1], p.hh1[2]})
- if err != nil || !bytes.Equal(p.hh2[2], h2) {
- c.failNodesPDP(p.n1, p.n2)
- return
- }
-
- fh, err := tz.Concat([][]byte{h1, h2})
- if err != nil || !bytes.Equal(fh, c.objectHomoHash(p.id)) {
- c.failNodesPDP(p.n1, p.n2)
- return
- }
-
- c.passNodesPDP(p.n1, p.n2)
-}
-
-func (c *Context) failNodesPDP(ns ...netmap.NodeInfo) {
- c.pairedMtx.Lock()
-
- for i := range ns {
- c.pairedNodes[ns[i].Hash()].failedPDP = true
- }
-
- c.pairedMtx.Unlock()
-}
-
-func (c *Context) passNodesPDP(ns ...netmap.NodeInfo) {
- c.pairedMtx.Lock()
-
- for i := range ns {
- c.pairedNodes[ns[i].Hash()].passedPDP = true
- }
-
- c.pairedMtx.Unlock()
-}
-
-func (c *Context) writePairsResult() {
- var failCount, okCount int
-
- c.iteratePairedNodes(
- func(netmap.NodeInfo) { failCount++ },
- func(netmap.NodeInfo) { okCount++ },
- )
-
- failedNodes := make([][]byte, 0, failCount)
- passedNodes := make([][]byte, 0, okCount)
-
- c.iteratePairedNodes(
- func(n netmap.NodeInfo) {
- failedNodes = append(failedNodes, n.PublicKey())
- },
- func(n netmap.NodeInfo) {
- passedNodes = append(passedNodes, n.PublicKey())
- },
- )
-
- c.report.SetPDPResults(passedNodes, failedNodes)
-}
-
-func (c *Context) iteratePairedNodes(onFail, onPass func(netmap.NodeInfo)) {
- for _, pairedNode := range c.pairedNodes {
- if pairedNode.failedPDP {
- onFail(pairedNode.node)
- }
-
- if pairedNode.passedPDP {
- onPass(pairedNode.node)
- }
- }
-}
diff --git a/pkg/services/audit/auditor/pop.go b/pkg/services/audit/auditor/pop.go
deleted file mode 100644
index f8a16cb0a4..0000000000
--- a/pkg/services/audit/auditor/pop.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package auditor
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
- "go.uber.org/zap"
-)
-
-const (
- hashRangeNumber = 4
- minGamePayloadSize = hashRangeNumber * tz.Size
-)
-
-func (c *Context) executePoP() {
- c.buildCoverage()
-
- c.report.SetPlacementCounters(
- c.counters.hit,
- c.counters.miss,
- c.counters.fail,
- )
-}
-
-func (c *Context) buildCoverage() {
- policy := c.task.ContainerStructure().PlacementPolicy()
-
- // select random member from another storage group
- // and process all placement vectors
- c.iterateSGMembersPlacementRand(func(id oid.ID, ind int, nodes []netmap.NodeInfo) bool {
- c.processObjectPlacement(id, nodes, policy.ReplicaNumberByIndex(ind))
- return c.containerCovered()
- })
-}
-
-func (c *Context) containerCovered() bool {
- // number of container nodes can be calculated once
- return c.cnrNodesNum <= len(c.pairedNodes)
-}
-
-func (c *Context) processObjectPlacement(id oid.ID, nodes []netmap.NodeInfo, replicas uint32) {
- var (
- ok uint32
- optimal bool
-
- unpairedCandidate1, unpairedCandidate2 = -1, -1
-
- pairedCandidate = -1
- )
-
- var getHeaderPrm GetHeaderPrm
- getHeaderPrm.Context = c.task.AuditContext()
- getHeaderPrm.OID = id
- getHeaderPrm.CID = c.task.ContainerID()
- getHeaderPrm.NodeIsRelay = false
-
- for i := 0; ok < replicas && i < len(nodes); i++ {
- getHeaderPrm.Node = nodes[i]
-
- // try to get object header from node
- hdr, err := c.cnrCom.GetHeader(getHeaderPrm)
- if err != nil {
- c.log.Debug("could not get object header from candidate",
- zap.Stringer("id", id),
- zap.String("error", err.Error()),
- )
-
- continue
- }
-
- c.updateHeadResponses(hdr)
-
- // increment success counter
- ok++
-
- // update optimal flag
- optimal = ok == replicas && uint32(i) < replicas
-
- // exclude small objects from coverage
- if c.objectSize(id) < minGamePayloadSize {
- continue
- }
-
- // update potential candidates to be paired
- if _, ok := c.pairedNodes[nodes[i].Hash()]; !ok {
- if unpairedCandidate1 < 0 {
- unpairedCandidate1 = i
- } else if unpairedCandidate2 < 0 {
- unpairedCandidate2 = i
- }
- } else if pairedCandidate < 0 {
- pairedCandidate = i
- }
- }
-
- if optimal {
- c.counters.hit++
- } else if ok == replicas {
- c.counters.miss++
- } else {
- c.counters.fail++
- }
-
- if unpairedCandidate1 >= 0 {
- if unpairedCandidate2 >= 0 {
- c.composePair(id, nodes[unpairedCandidate1], nodes[unpairedCandidate2])
- } else if pairedCandidate >= 0 {
- c.composePair(id, nodes[unpairedCandidate1], nodes[pairedCandidate])
- }
- }
-}
-
-func (c *Context) composePair(id oid.ID, n1, n2 netmap.NodeInfo) {
- c.pairs = append(c.pairs, gamePair{
- n1: n1,
- n2: n2,
- id: id,
- })
-
- c.pairedNodes[n1.Hash()] = &pairMemberInfo{
- node: n1,
- }
- c.pairedNodes[n2.Hash()] = &pairMemberInfo{
- node: n2,
- }
-}
-
-func (c *Context) iterateSGMembersPlacementRand(f func(oid.ID, int, []netmap.NodeInfo) bool) {
- // iterate over storage groups members for all storage groups (one by one)
- // with randomly shuffled members
- c.iterateSGMembersRand(func(id oid.ID) bool {
- // build placement vector for the current object
- nn, err := c.buildPlacement(id)
- if err != nil {
- c.log.Debug("could not build placement for object",
- zap.Stringer("id", id),
- zap.String("error", err.Error()),
- )
-
- return false
- }
-
- for i, nodes := range nn {
- if f(id, i, nodes) {
- return true
- }
- }
-
- return false
- })
-}
-
-func (c *Context) iterateSGMembersRand(f func(oid.ID) bool) {
- c.iterateSGInfo(func(members []oid.ID) bool {
- ln := len(members)
-
- processed := make(map[uint64]struct{}, ln-1)
-
- for len(processed) < ln {
- ind := nextRandUint64(uint64(ln), processed)
- processed[ind] = struct{}{}
-
- if f(members[ind]) {
- return true
- }
- }
-
- return false
- })
-}
-
-func (c *Context) iterateSGInfo(f func([]oid.ID) bool) {
- c.sgMembersMtx.RLock()
- defer c.sgMembersMtx.RUnlock()
-
- // we can add randomization like for SG members,
- // but list of storage groups is already expected
- // to be shuffled since it is a Search response
- // with unpredictable order
- for i := range c.sgMembersCache {
- if f(c.sgMembersCache[i]) {
- return
- }
- }
-}
diff --git a/pkg/services/audit/auditor/por.go b/pkg/services/audit/auditor/por.go
deleted file mode 100644
index 432826acff..0000000000
--- a/pkg/services/audit/auditor/por.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package auditor
-
-import (
- "bytes"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- storagegroupSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
- "go.uber.org/zap"
-)
-
-func (c *Context) executePoR() {
- wg := new(sync.WaitGroup)
- sgs := c.task.StorageGroupList()
-
- for _, sg := range sgs {
- wg.Add(1)
-
- if err := c.porWorkerPool.Submit(func() {
- c.checkStorageGroupPoR(sg.ID(), sg.StorageGroup())
- wg.Done()
- }); err != nil {
- wg.Done()
- }
- }
-
- wg.Wait()
- c.porWorkerPool.Release()
-
- c.report.SetPoRCounters(c.porRequests.Load(), c.porRetries.Load())
-}
-
-func (c *Context) checkStorageGroupPoR(sgID oid.ID, sg storagegroupSDK.StorageGroup) {
- members := sg.Members()
- c.updateSGInfo(sgID, members)
-
- var (
- tzHash []byte
- totalSize uint64
-
- accRequests, accRetries uint32
- )
-
- var getHeaderPrm GetHeaderPrm
- getHeaderPrm.Context = c.task.AuditContext()
- getHeaderPrm.CID = c.task.ContainerID()
- getHeaderPrm.NodeIsRelay = true
-
- homomorphicHashingEnabled := !containerSDK.IsHomomorphicHashingDisabled(c.task.ContainerStructure())
-
- for i := range members {
- objectPlacement, err := c.buildPlacement(members[i])
- if err != nil {
- c.log.Info("can't build placement for storage group member",
- zap.Stringer("sg", sgID),
- zap.String("member_id", members[i].String()),
- )
-
- continue
- }
-
- flat := placement.FlattenNodes(objectPlacement)
-
- rand.Shuffle(len(flat), func(i, j int) {
- flat[i], flat[j] = flat[j], flat[i]
- })
-
- getHeaderPrm.OID = members[i]
-
- for j := range flat {
- accRequests++
- if j > 0 { // in best case audit get object header on first iteration
- accRetries++
- }
-
- getHeaderPrm.Node = flat[j]
-
- hdr, err := c.cnrCom.GetHeader(getHeaderPrm)
- if err != nil {
- c.log.Debug("can't head object",
- zap.String("remote_node", netmap.StringifyPublicKey(flat[j])),
- zap.Stringer("oid", members[i]),
- )
-
- continue
- }
-
- // update cache for PoR and PDP audit checks
- c.updateHeadResponses(hdr)
-
- if homomorphicHashingEnabled {
- cs, _ := hdr.PayloadHomomorphicHash()
- if len(tzHash) == 0 {
- tzHash = cs.Value()
- } else {
- tzHash, err = tz.Concat([][]byte{
- tzHash,
- cs.Value(),
- })
- if err != nil {
- c.log.Debug("can't concatenate tz hash",
- zap.String("oid", members[i].String()),
- zap.String("error", err.Error()))
-
- break
- }
- }
- }
-
- totalSize += hdr.PayloadSize()
-
- break
- }
- }
-
- c.porRequests.Add(accRequests)
- c.porRetries.Add(accRetries)
-
- sizeCheck := sg.ValidationDataSize() == totalSize
- cs, _ := sg.ValidationDataHash()
- tzCheck := !homomorphicHashingEnabled || bytes.Equal(tzHash, cs.Value())
-
- if sizeCheck && tzCheck {
- c.report.PassedPoR(sgID) // write report
- } else {
- if !sizeCheck {
- c.log.Debug("storage group size check failed",
- zap.Uint64("expected", sg.ValidationDataSize()),
- zap.Uint64("got", totalSize))
- }
-
- if !tzCheck {
- c.log.Debug("storage group tz hash check failed")
- }
-
- c.report.FailedPoR(sgID) // write report
- }
-}
diff --git a/pkg/services/audit/auditor/util.go b/pkg/services/audit/auditor/util.go
deleted file mode 100644
index 5f86855347..0000000000
--- a/pkg/services/audit/auditor/util.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package auditor
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
-)
-
-// nextRandUint64 returns random uint64 number [0; n) outside exclude map.
-// Panics if len(exclude) >= n.
-func nextRandUint64(n uint64, exclude map[uint64]struct{}) uint64 {
- ln := uint64(len(exclude))
- ind := rand.Uint64() % (n - ln)
-
- for i := ind; ; i++ {
- if _, ok := exclude[i]; !ok {
- return i
- }
- }
-}
diff --git a/pkg/services/audit/report.go b/pkg/services/audit/report.go
deleted file mode 100644
index f16f973842..0000000000
--- a/pkg/services/audit/report.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package audit
-
-import (
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-// Report tracks the progress of auditing container data.
-type Report struct {
- mu sync.RWMutex
- res audit.Result
-}
-
-// Reporter is an interface of the entity that records
-// the data audit report.
-type Reporter interface {
- WriteReport(r *Report) error
-}
-
-// NewReport creates and returns blank Report instance.
-func NewReport(cnr cid.ID) *Report {
- var rep Report
- rep.res.ForContainer(cnr)
-
- return &rep
-}
-
-// Result forms the structure of the data audit result.
-func (r *Report) Result() *audit.Result {
- r.mu.RLock()
- defer r.mu.RUnlock()
-
- return &r.res
-}
-
-// Complete completes audit report.
-func (r *Report) Complete() {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- r.res.Complete()
-}
-
-// PassedPoR updates list of passed storage groups.
-func (r *Report) PassedPoR(sg oid.ID) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- r.res.SubmitPassedStorageGroup(sg)
-}
-
-// FailedPoR updates list of failed storage groups.
-func (r *Report) FailedPoR(sg oid.ID) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- r.res.SubmitFailedStorageGroup(sg)
-}
-
-// SetPlacementCounters sets counters of compliance with placement.
-func (r *Report) SetPlacementCounters(hit, miss, fail uint32) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- r.res.SetHits(hit)
- r.res.SetMisses(miss)
- r.res.SetFailures(fail)
-}
-
-// SetPDPResults sets lists of nodes according to their PDP results.
-func (r *Report) SetPDPResults(passed, failed [][]byte) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- r.res.SubmitPassedStorageNodes(passed)
- r.res.SubmitFailedStorageNodes(failed)
-}
-
-// SetPoRCounters sets amounts of head requests and retries at PoR audit stage.
-func (r *Report) SetPoRCounters(requests, retries uint32) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- r.res.SetRequestsPoR(requests)
- r.res.SetRetriesPoR(retries)
-}
diff --git a/pkg/services/audit/task.go b/pkg/services/audit/task.go
deleted file mode 100644
index 554aab6c4b..0000000000
--- a/pkg/services/audit/task.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package audit
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-// Task groups groups the container audit parameters.
-type Task struct {
- reporter Reporter
-
- auditContext context.Context
-
- idCnr cid.ID
-
- cnr container.Container
-
- nm *netmap.NetMap
-
- cnrNodes [][]netmap.NodeInfo
-
- sgList []storagegroup.StorageGroup
-}
-
-// WithReporter sets audit report writer.
-func (t *Task) WithReporter(r Reporter) *Task {
- if t != nil {
- t.reporter = r
- }
-
- return t
-}
-
-// Reporter returns audit report writer.
-func (t *Task) Reporter() Reporter {
- return t.reporter
-}
-
-// WithAuditContext sets context of the audit of the current epoch.
-func (t *Task) WithAuditContext(ctx context.Context) *Task {
- if t != nil {
- t.auditContext = ctx
- }
-
- return t
-}
-
-// AuditContext returns context of the audit of the current epoch.
-func (t *Task) AuditContext() context.Context {
- return t.auditContext
-}
-
-// WithContainerID sets identifier of the container under audit.
-func (t *Task) WithContainerID(cnr cid.ID) *Task {
- if t != nil {
- t.idCnr = cnr
- }
-
- return t
-}
-
-// ContainerID returns identifier of the container under audit.
-func (t *Task) ContainerID() cid.ID {
- return t.idCnr
-}
-
-// WithContainerStructure sets structure of the container under audit.
-func (t *Task) WithContainerStructure(cnr container.Container) *Task {
- if t != nil {
- t.cnr = cnr
- }
-
- return t
-}
-
-// ContainerStructure returns structure of the container under audit.
-func (t *Task) ContainerStructure() container.Container {
- return t.cnr
-}
-
-// WithContainerNodes sets nodes in the container under audit.
-func (t *Task) WithContainerNodes(cnrNodes [][]netmap.NodeInfo) *Task {
- if t != nil {
- t.cnrNodes = cnrNodes
- }
-
- return t
-}
-
-// NetworkMap returns network map of audit epoch.
-func (t *Task) NetworkMap() *netmap.NetMap {
- return t.nm
-}
-
-// WithNetworkMap sets network map of audit epoch.
-func (t *Task) WithNetworkMap(nm *netmap.NetMap) *Task {
- if t != nil {
- t.nm = nm
- }
-
- return t
-}
-
-// ContainerNodes returns nodes in the container under audit.
-func (t *Task) ContainerNodes() [][]netmap.NodeInfo {
- return t.cnrNodes
-}
-
-// WithStorageGroupList sets a list of storage groups from container under audit.
-func (t *Task) WithStorageGroupList(sgList []storagegroup.StorageGroup) *Task {
- if t != nil {
- t.sgList = sgList
- }
-
- return t
-}
-
-// StorageGroupList returns list of storage groups from container under audit.
-func (t *Task) StorageGroupList() []storagegroup.StorageGroup {
- return t.sgList
-}
diff --git a/pkg/services/audit/taskmanager/listen.go b/pkg/services/audit/taskmanager/listen.go
deleted file mode 100644
index 4e8a3df683..0000000000
--- a/pkg/services/audit/taskmanager/listen.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package audittask
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/auditor"
- "go.uber.org/zap"
-)
-
-// Listen starts the process of processing tasks from the queue.
-//
-// The listener is terminated by context.
-func (m *Manager) Listen(ctx context.Context) {
- m.log.Info("process routine",
- zap.Uint32("queue_capacity", m.queueCap),
- )
-
- m.ch = make(chan *audit.Task, m.queueCap)
-
- for {
- select {
- case <-ctx.Done():
- m.log.Warn("stop listener by context",
- zap.String("error", ctx.Err().Error()),
- )
- m.workerPool.Release()
-
- return
- case task, ok := <-m.ch:
- if !ok {
- m.log.Warn("queue channel is closed")
- return
- }
-
- m.handleTask(task)
- }
- }
-}
-
-func (m *Manager) handleTask(task *audit.Task) {
- pdpPool, err := m.pdpPoolGenerator()
- if err != nil {
- m.log.Error("could not generate PDP worker pool",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- porPool, err := m.pdpPoolGenerator()
- if err != nil {
- m.log.Error("could not generate PoR worker pool",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- auditContext := m.generateContext(task).
- WithPDPWorkerPool(pdpPool).
- WithPoRWorkerPool(porPool)
-
- if err := m.workerPool.Submit(auditContext.Execute); err != nil {
- // may be we should report it
- m.log.Warn("could not submit audit task")
- }
-}
-
-func (m *Manager) generateContext(task *audit.Task) *auditor.Context {
- return auditor.NewContext(m.ctxPrm).
- WithTask(task)
-}
diff --git a/pkg/services/audit/taskmanager/manager.go b/pkg/services/audit/taskmanager/manager.go
deleted file mode 100644
index bf76987990..0000000000
--- a/pkg/services/audit/taskmanager/manager.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package audittask
-
-import (
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/auditor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Manager represents an entity performing data audit tasks.
-type Manager struct {
- *cfg
-
- ch chan *audit.Task
-}
-
-// Option is a Manager's constructor option.
-type Option func(*cfg)
-
-type cfg struct {
- queueCap uint32
-
- log *logger.Logger
-
- ctxPrm auditor.ContextPrm
-
- workerPool util.WorkerPool
-
- pdpPoolGenerator, porPoolGenerator func() (util.WorkerPool, error)
-}
-
-func defaultCfg() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// New creates, initializes and returns new Manager instance.
-func New(opts ...Option) *Manager {
- c := defaultCfg()
-
- for i := range opts {
- opts[i](c)
- }
-
- return &Manager{
- cfg: c,
- }
-}
-
-// WithLogger returns option to specify Manager's logger.
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Audit task manager"))}
- c.ctxPrm.SetLogger(l)
- }
-}
-
-// WithWorkerPool returns option to set worker pool
-// for task execution.
-func WithWorkerPool(p util.WorkerPool) Option {
- return func(c *cfg) {
- c.workerPool = p
- }
-}
-
-// WithQueueCapacity returns option to set task queue capacity.
-func WithQueueCapacity(capacity uint32) Option {
- return func(c *cfg) {
- c.queueCap = capacity
- }
-}
-
-// WithContainerCommunicator returns option to set component of communication
-// with container nodes.
-func WithContainerCommunicator(cnrCom auditor.ContainerCommunicator) Option {
- return func(c *cfg) {
- c.ctxPrm.SetContainerCommunicator(cnrCom)
- }
-}
-
-// WithMaxPDPSleepInterval returns option to set maximum sleep interval
-// between range hash requests as part of PDP check.
-func WithMaxPDPSleepInterval(dur time.Duration) Option {
- return func(c *cfg) {
- c.ctxPrm.SetMaxPDPSleep(dur)
- }
-}
-
-// WithPDPWorkerPoolGenerator returns option to set worker pool for PDP pairs processing.
-// Callback caller owns returned pool and must release it appropriately.
-func WithPDPWorkerPoolGenerator(f func() (util.WorkerPool, error)) Option {
- return func(c *cfg) {
- c.pdpPoolGenerator = f
- }
-}
-
-// WithPoRWorkerPoolGenerator returns option to set worker pool for PoR SG processing.
-// Callback caller owns returned pool and must release it appropriately.
-func WithPoRWorkerPoolGenerator(f func() (util.WorkerPool, error)) Option {
- return func(c *cfg) {
- c.porPoolGenerator = f
- }
-}
diff --git a/pkg/services/audit/taskmanager/push.go b/pkg/services/audit/taskmanager/push.go
deleted file mode 100644
index 13f8fd12db..0000000000
--- a/pkg/services/audit/taskmanager/push.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package audittask
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
-)
-
-// PushTask adds a task to the queue for processing.
-//
-// Returns error if task was not added to the queue.
-func (m *Manager) PushTask(t *audit.Task) error {
- m.ch <- t
- return nil
-}
diff --git a/pkg/services/audit/taskmanager/reset.go b/pkg/services/audit/taskmanager/reset.go
deleted file mode 100644
index 86f2538cf7..0000000000
--- a/pkg/services/audit/taskmanager/reset.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package audittask
-
-// Reset pops all tasks from the queue.
-// Returns amount of popped elements.
-func (m *Manager) Reset() (popped int) {
- for ; len(m.ch) > 0; popped++ {
- <-m.ch
- }
-
- return
-}
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
new file mode 100644
index 0000000000..ac15dd1073
--- /dev/null
+++ b/pkg/services/common/ape/checker.go
@@ -0,0 +1,170 @@
+package ape
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+
+ aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+var (
+ errBearerExpired = errors.New("bearer token has expired")
+ errBearerInvalidSignature = errors.New("bearer token has invalid signature")
+ errBearerInvalidContainerID = errors.New("bearer token was created for another container")
+ errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
+ errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
+)
+
+type CheckPrm struct {
+ // Request is an APE-request that is checked by policy engine.
+ Request aperequest.Request
+
+ Namespace string
+
+ Container cid.ID
+
+ // An encoded container's owner user ID.
+ ContainerOwner user.ID
+
+ // PublicKey is public key of the request sender.
+ PublicKey *keys.PublicKey
+
+ // The request's bearer token. It is used in order to check APE overrides with the token.
+ BearerToken *bearer.Token
+}
+
+// CheckCore provides methods to perform the common logic of APE check.
+type CheckCore interface {
+ // CheckAPE performs the common policy-engine check logic on a prepared request.
+ CheckAPE(ctx context.Context, prm CheckPrm) error
+}
+
+type checkerCoreImpl struct {
+ LocalOverrideStorage policyengine.LocalOverrideStorage
+ MorphChainStorage policyengine.MorphRuleChainStorageReader
+ FrostFSSubjectProvider frostfsidcore.SubjectProvider
+ State netmap.State
+}
+
+func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader,
+ frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State,
+) CheckCore {
+ return &checkerCoreImpl{
+ LocalOverrideStorage: localOverrideStorage,
+ MorphChainStorage: morphChainStorage,
+ FrostFSSubjectProvider: frostFSSubjectProvider,
+ State: state,
+ }
+}
+
+// CheckAPE performs the common policy-engine check logic on a prepared request.
+func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error {
+ var cr policyengine.ChainRouter
+ if prm.BearerToken != nil {
+ var err error
+ if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil {
+ return fmt.Errorf("bearer validation error: %w", err)
+ }
+ if prm.BearerToken.Impersonate() {
+ cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
+ } else {
+ override, isSet := prm.BearerToken.APEOverride()
+ if !isSet {
+ return errors.New("expected for override within bearer")
+ }
+ cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override)
+ if err != nil {
+ return fmt.Errorf("create chain router error: %w", err)
+ }
+ }
+ } else {
+ cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
+ }
+
+ groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey)
+ if err != nil {
+ return fmt.Errorf("failed to get group ids: %w", err)
+ }
+
+ // Policy contract keeps group related chains as namespace-group pair.
+ for i := range groups {
+ groups[i] = fmt.Sprintf("%s:%s", prm.Namespace, groups[i])
+ }
+
+ rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, prm.PublicKey.Address()), groups)
+ status, found, err := cr.IsAllowed(apechain.Ingress, rt, prm.Request)
+ if err != nil {
+ return err
+ }
+ if found && status == apechain.Allow {
+ return nil
+ }
+ return newChainRouterError(prm.Request.Operation(), status)
+}
+
+// isValidBearer checks whether bearer token was correctly signed by authorized
+// entity. This method might be defined on whole ACL service because it will
+// require fetching current epoch to check lifetime.
+func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKey *keys.PublicKey, st netmap.State) error {
+ if token == nil {
+ return nil
+ }
+
+ // First check token lifetime. Simplest verification.
+ if token.InvalidAt(st.CurrentEpoch()) {
+ return errBearerExpired
+ }
+
+ // Then check if bearer token is signed correctly.
+ if !token.VerifySignature() {
+ return errBearerInvalidSignature
+ }
+
+ // Check for ape overrides defined in the bearer token.
+ if apeOverride, isSet := token.APEOverride(); isSet {
+ switch apeOverride.Target.TargetType {
+ case ape.TargetTypeContainer:
+ var targetCnr cid.ID
+ err := targetCnr.DecodeString(apeOverride.Target.Name)
+ if err != nil {
+ return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
+ }
+ if !cntID.Equals(targetCnr) {
+ return errBearerInvalidContainerID
+ }
+ default:
+ }
+ }
+
+ // Ignore verification checks if token is impersonated.
+ if token.Impersonate() {
+ return nil
+ }
+
+ // Then check if container owner signed this token.
+ if !bearer.ResolveIssuer(*token).Equals(ownerCnr) {
+ return errBearerNotSignedByOwner
+ }
+
+ // Then check if request sender has rights to use this token.
+ var usrSender user.ID
+ user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
+
+ if !token.AssertUser(usrSender) {
+ return errBearerInvalidOwner
+ }
+
+ return nil
+}
diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go
new file mode 100644
index 0000000000..d3c381de7b
--- /dev/null
+++ b/pkg/services/common/ape/error.go
@@ -0,0 +1,33 @@
+package ape
+
+import (
+ "fmt"
+
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+)
+
+// ChainRouterError is returned when chain router validation prevents
+// the APE request from being processed (no rule found, access denied, etc.).
+type ChainRouterError struct {
+ operation string
+ status apechain.Status
+}
+
+func (e *ChainRouterError) Error() string {
+ return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status())
+}
+
+func (e *ChainRouterError) Operation() string {
+ return e.operation
+}
+
+func (e *ChainRouterError) Status() apechain.Status {
+ return e.status
+}
+
+func newChainRouterError(operation string, status apechain.Status) *ChainRouterError {
+ return &ChainRouterError{
+ operation: operation,
+ status: status,
+ }
+}
diff --git a/pkg/services/container/announcement/load/controller/calls.go b/pkg/services/container/announcement/load/controller/calls.go
deleted file mode 100644
index 54eb7e07ce..0000000000
--- a/pkg/services/container/announcement/load/controller/calls.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package loadcontroller
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "go.uber.org/zap"
-)
-
-// StartPrm groups the required parameters of the Controller.Start method.
-type StartPrm struct {
- // Epoch number by which you want to select
- // the values of the used space of containers.
- Epoch uint64
-}
-
-type commonContext struct {
- epoch uint64
-
- ctrl *Controller
-
- log *logger.Logger
-
- ctx context.Context
-}
-
-type announceContext struct {
- commonContext
-}
-
-// Start starts the processing of container.SizeEstimation values.
-//
-// Single Start operation overtakes all data from LocalMetrics to
-// LocalAnnouncementTarget (Controller's parameters).
-// No filter by epoch is used for the iterator, since it is expected
-// that the source of metrics does not track the change of epochs.
-//
-// Each call acquires an announcement context for an Epoch parameter.
-// At the very end of the operation, the context is released.
-func (c *Controller) Start(prm StartPrm) {
- // acquire announcement
- execCtx := c.acquireAnnouncement(prm)
- if execCtx == nil {
- return
- }
-
- // finally stop and free the announcement
- defer execCtx.freeAnnouncement()
-
- // announce local values
- execCtx.announce()
-}
-
-func (c *announceContext) announce() {
- c.log.Debug("starting to announce the values of the metrics")
-
- var (
- metricsIterator Iterator
- err error
- )
-
- // initialize iterator over locally collected metrics
- metricsIterator, err = c.ctrl.prm.LocalMetrics.InitIterator(c.ctx)
- if err != nil {
- c.log.Debug("could not initialize iterator over locally collected metrics",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // initialize target of local announcements
- targetWriter, err := c.ctrl.prm.LocalAnnouncementTarget.InitWriter(c.ctx)
- if err != nil {
- c.log.Debug("could not initialize announcement accumulator",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // iterate over all collected metrics and write them to the target
- err = metricsIterator.Iterate(
- func(container.SizeEstimation) bool {
- return true // local metrics don't know about epochs
- },
- func(a container.SizeEstimation) error {
- a.SetEpoch(c.epoch) // set epoch explicitly
- return targetWriter.Put(a)
- },
- )
- if err != nil {
- c.log.Debug("iterator over locally collected metrics aborted",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // finish writing
- err = targetWriter.Close()
- if err != nil {
- c.log.Debug("could not finish writing local announcements",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- c.log.Debug("trust announcement successfully finished")
-}
-
-func (c *Controller) acquireAnnouncement(prm StartPrm) *announceContext {
- var ctx context.Context
-
- c.announceMtx.Lock()
-
- {
- if cancel := c.mAnnounceCtx[prm.Epoch]; cancel == nil {
- ctx, cancel = context.WithCancel(context.Background())
- c.mAnnounceCtx[prm.Epoch] = cancel
- }
- }
-
- c.announceMtx.Unlock()
-
- log := &logger.Logger{Logger: c.opts.log.With(
- zap.Uint64("epoch", prm.Epoch),
- )}
-
- if ctx == nil {
- log.Debug("announcement is already started")
- return nil
- }
-
- return &announceContext{
- commonContext: commonContext{
- epoch: prm.Epoch,
- ctrl: c,
- log: log,
- ctx: ctx,
- },
- }
-}
-
-func (c *commonContext) freeAnnouncement() {
- var stopped bool
-
- c.ctrl.announceMtx.Lock()
-
- {
- var cancel context.CancelFunc
-
- cancel, stopped = c.ctrl.mAnnounceCtx[c.epoch]
-
- if stopped {
- cancel()
- delete(c.ctrl.mAnnounceCtx, c.epoch)
- }
- }
-
- c.ctrl.announceMtx.Unlock()
-
- if stopped {
- c.log.Debug("announcement successfully interrupted")
- } else {
- c.log.Debug("announcement is not started or already interrupted")
- }
-}
-
-// StopPrm groups the required parameters of the Controller.Stop method.
-type StopPrm struct {
- // Epoch number the analysis of the values of which must be interrupted.
- Epoch uint64
-}
-
-type stopContext struct {
- commonContext
-}
-
-// Stop interrupts the processing of container.SizeEstimation values.
-//
-// Single Stop operation releases an announcement context and overtakes
-// all data from AnnouncementAccumulator to ResultReceiver (Controller's
-// parameters). Only values for the specified Epoch parameter are processed.
-//
-// Each call acquires a report context for an Epoch parameter.
-// At the very end of the operation, the context is released.
-func (c *Controller) Stop(prm StopPrm) {
- execCtx := c.acquireReport(prm)
- if execCtx == nil {
- return
- }
-
- // finally stop and free reporting
- defer execCtx.freeReport()
-
- // interrupt announcement
- execCtx.freeAnnouncement()
-
- // report the estimations
- execCtx.report()
-}
-
-func (c *Controller) acquireReport(prm StopPrm) *stopContext {
- var ctx context.Context
-
- c.reportMtx.Lock()
-
- {
- if cancel := c.mReportCtx[prm.Epoch]; cancel == nil {
- ctx, cancel = context.WithCancel(context.Background())
- c.mReportCtx[prm.Epoch] = cancel
- }
- }
-
- c.reportMtx.Unlock()
-
- log := &logger.Logger{Logger: c.opts.log.With(
- zap.Uint64("epoch", prm.Epoch),
- )}
-
- if ctx == nil {
- log.Debug("report is already started")
- return nil
- }
-
- return &stopContext{
- commonContext: commonContext{
- epoch: prm.Epoch,
- ctrl: c,
- log: log,
- },
- }
-}
-
-func (c *commonContext) freeReport() {
- var stopped bool
-
- c.ctrl.reportMtx.Lock()
-
- {
- var cancel context.CancelFunc
-
- cancel, stopped = c.ctrl.mReportCtx[c.epoch]
-
- if stopped {
- cancel()
- delete(c.ctrl.mReportCtx, c.epoch)
- }
- }
-
- c.ctrl.reportMtx.Unlock()
-
- if stopped {
- c.log.Debug("announcement successfully interrupted")
- } else {
- c.log.Debug("announcement is not started or already interrupted")
- }
-}
-
-func (c *stopContext) report() {
- var (
- localIterator Iterator
- err error
- )
-
- // initialize iterator over locally accumulated announcements
- localIterator, err = c.ctrl.prm.AnnouncementAccumulator.InitIterator(c.ctx)
- if err != nil {
- c.log.Debug("could not initialize iterator over locally accumulated announcements",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // initialize final destination of load estimations
- resultWriter, err := c.ctrl.prm.ResultReceiver.InitWriter(c.ctx)
- if err != nil {
- c.log.Debug("could not initialize result target",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // iterate over all accumulated announcements and write them to the target
- err = localIterator.Iterate(
- usedSpaceFilterEpochEQ(c.epoch),
- resultWriter.Put,
- )
- if err != nil {
- c.log.Debug("iterator over local announcements aborted",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // finish writing
- err = resultWriter.Close()
- if err != nil {
- c.log.Debug("could not finish writing load estimations",
- zap.String("error", err.Error()),
- )
- }
-}
diff --git a/pkg/services/container/announcement/load/controller/calls_test.go b/pkg/services/container/announcement/load/controller/calls_test.go
deleted file mode 100644
index 1737921a24..0000000000
--- a/pkg/services/container/announcement/load/controller/calls_test.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package loadcontroller_test
-
-import (
- "context"
- "math/rand"
- "sync"
- "testing"
-
- loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "github.com/stretchr/testify/require"
-)
-
-type testAnnouncementStorage struct {
- w loadcontroller.Writer
-
- i loadcontroller.Iterator
-
- mtx sync.RWMutex
-
- m map[uint64][]container.SizeEstimation
-}
-
-func newTestStorage() *testAnnouncementStorage {
- return &testAnnouncementStorage{
- m: make(map[uint64][]container.SizeEstimation),
- }
-}
-
-func (s *testAnnouncementStorage) InitIterator(context.Context) (loadcontroller.Iterator, error) {
- if s.i != nil {
- return s.i, nil
- }
-
- return s, nil
-}
-
-func (s *testAnnouncementStorage) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontroller.UsedSpaceHandler) error {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
-
- for _, v := range s.m {
- for _, a := range v {
- if f(a) {
- if err := h(a); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-func (s *testAnnouncementStorage) InitWriter(context.Context) (loadcontroller.Writer, error) {
- if s.w != nil {
- return s.w, nil
- }
-
- return s, nil
-}
-
-func (s *testAnnouncementStorage) Put(v container.SizeEstimation) error {
- s.mtx.Lock()
- s.m[v.Epoch()] = append(s.m[v.Epoch()], v)
- s.mtx.Unlock()
-
- return nil
-}
-
-func (s *testAnnouncementStorage) Close() error {
- return nil
-}
-
-func randAnnouncement() (a container.SizeEstimation) {
- a.SetContainer(cidtest.ID())
- a.SetValue(rand.Uint64())
-
- return
-}
-
-func TestSimpleScenario(t *testing.T) {
- // create storage to write final estimations
- resultStorage := newTestStorage()
-
- // create storages to accumulate announcements
- accumulatingStorageN2 := newTestStorage()
-
- // create storage of local metrics
- localStorageN1 := newTestStorage()
- localStorageN2 := newTestStorage()
-
- // create 2 controllers: 1st writes announcements to 2nd, 2nd directly to final destination
- ctrlN1 := loadcontroller.New(loadcontroller.Prm{
- LocalMetrics: localStorageN1,
- AnnouncementAccumulator: newTestStorage(),
- LocalAnnouncementTarget: &testAnnouncementStorage{
- w: accumulatingStorageN2,
- },
- ResultReceiver: resultStorage,
- })
-
- ctrlN2 := loadcontroller.New(loadcontroller.Prm{
- LocalMetrics: localStorageN2,
- AnnouncementAccumulator: accumulatingStorageN2,
- LocalAnnouncementTarget: &testAnnouncementStorage{
- w: resultStorage,
- },
- ResultReceiver: resultStorage,
- })
-
- const processEpoch uint64 = 10
-
- const goodNum = 4
-
- // create 2 random values for processing epoch and 1 for some different
- announces := make([]container.SizeEstimation, 0, goodNum)
-
- for i := 0; i < goodNum; i++ {
- a := randAnnouncement()
- a.SetEpoch(processEpoch)
-
- announces = append(announces, a)
- }
-
- // store one half of "good" announcements to 1st metrics storage, another - to 2nd
- // and "bad" to both
- for i := 0; i < goodNum/2; i++ {
- require.NoError(t, localStorageN1.Put(announces[i]))
- }
-
- for i := goodNum / 2; i < goodNum; i++ {
- require.NoError(t, localStorageN2.Put(announces[i]))
- }
-
- wg := new(sync.WaitGroup)
- wg.Add(2)
-
- startPrm := loadcontroller.StartPrm{
- Epoch: processEpoch,
- }
-
- // start both controllers
- go func() {
- ctrlN1.Start(startPrm)
- wg.Done()
- }()
-
- go func() {
- ctrlN2.Start(startPrm)
- wg.Done()
- }()
-
- wg.Wait()
- wg.Add(2)
-
- stopPrm := loadcontroller.StopPrm{
- Epoch: processEpoch,
- }
-
- // stop both controllers
- go func() {
- ctrlN1.Stop(stopPrm)
- wg.Done()
- }()
-
- go func() {
- ctrlN2.Stop(stopPrm)
- wg.Done()
- }()
-
- wg.Wait()
-
- // result target should contain all "good" announcements and shoult not container the "bad" one
- var res []container.SizeEstimation
-
- err := resultStorage.Iterate(
- func(a container.SizeEstimation) bool {
- return true
- },
- func(a container.SizeEstimation) error {
- res = append(res, a)
- return nil
- },
- )
- require.NoError(t, err)
-
- for i := range announces {
- require.Contains(t, res, announces[i])
- }
-}
diff --git a/pkg/services/container/announcement/load/controller/controller.go b/pkg/services/container/announcement/load/controller/controller.go
deleted file mode 100644
index ef6dbade73..0000000000
--- a/pkg/services/container/announcement/load/controller/controller.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package loadcontroller
-
-import (
- "context"
- "fmt"
- "sync"
-)
-
-// Prm groups the required parameters of the Controller's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Iterator over the used space values of the containers
- // collected by the node locally.
- LocalMetrics IteratorProvider
-
- // Place of recording the local values of
- // the used space of containers.
- LocalAnnouncementTarget WriterProvider
-
- // Iterator over the summarized used space scores
- // from the various network participants.
- AnnouncementAccumulator IteratorProvider
-
- // Place of recording the final estimates of
- // the used space of containers.
- ResultReceiver WriterProvider
-}
-
-// Controller represents main handler for starting
-// and interrupting container volume estimation.
-//
-// It binds the interfaces of the local value stores
-// to the target storage points. Controller is abstracted
-// from the internal storage device and the network location
-// of the connecting components. At its core, it is a
-// high-level start-stop trigger for calculations.
-//
-// For correct operation, the controller must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the constructor is immediately ready to work through
-// API of external control of calculations and data transfer.
-type Controller struct {
- prm Prm
-
- opts *options
-
- announceMtx sync.Mutex
- mAnnounceCtx map[uint64]context.CancelFunc
-
- reportMtx sync.Mutex
- mReportCtx map[uint64]context.CancelFunc
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Controller.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Controller does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Controller {
- switch {
- case prm.LocalMetrics == nil:
- panicOnPrmValue("LocalMetrics", prm.LocalMetrics)
- case prm.AnnouncementAccumulator == nil:
- panicOnPrmValue("AnnouncementAccumulator", prm.AnnouncementAccumulator)
- case prm.LocalAnnouncementTarget == nil:
- panicOnPrmValue("LocalAnnouncementTarget", prm.LocalAnnouncementTarget)
- case prm.ResultReceiver == nil:
- panicOnPrmValue("ResultReceiver", prm.ResultReceiver)
- }
-
- o := defaultOpts()
-
- for _, opt := range opts {
- opt(o)
- }
-
- return &Controller{
- prm: prm,
- opts: o,
- mAnnounceCtx: make(map[uint64]context.CancelFunc),
- mReportCtx: make(map[uint64]context.CancelFunc),
- }
-}
diff --git a/pkg/services/container/announcement/load/controller/deps.go b/pkg/services/container/announcement/load/controller/deps.go
deleted file mode 100644
index 953ea452bb..0000000000
--- a/pkg/services/container/announcement/load/controller/deps.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package loadcontroller
-
-import (
- "context"
- "io"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
-)
-
-// UsedSpaceHandler describes the signature of the container.SizeEstimation
-// value handling function.
-//
-// Termination of processing without failures is usually signaled
-// with a zero error, while a specific value may describe the reason
-// for failure.
-type UsedSpaceHandler func(container.SizeEstimation) error
-
-// UsedSpaceFilter describes the signature of the function for
-// checking whether a value meets a certain criterion.
-//
-// Return of true means conformity, false - vice versa.
-type UsedSpaceFilter func(container.SizeEstimation) bool
-
-// Iterator is a group of methods provided by entity
-// which can iterate over a group of container.SizeEstimation values.
-type Iterator interface {
- // Iterate must start an iterator over values that
- // meet the filter criterion (returns true).
- // For each such value should call a handler, the error
- // of which should be directly returned from the method.
- //
- // Internal failures of the iterator are also signaled via
- // an error. After a successful call to the last value
- // handler, nil should be returned.
- Iterate(UsedSpaceFilter, UsedSpaceHandler) error
-}
-
-// IteratorProvider is a group of methods provided
-// by entity which generates iterators over
-// container.SizeEstimation values.
-type IteratorProvider interface {
- // InitIterator should return an initialized Iterator.
- //
- // Initialization problems are reported via error.
- // If no error was returned, then the Iterator must not be nil.
- //
- // Implementations can have different logic for different
- // contexts, so specific ones may document their own behavior.
- InitIterator(context.Context) (Iterator, error)
-}
-
-// Writer describes the interface for storing container.SizeEstimation values.
-//
-// This interface is provided by both local storage
-// of values and remote (wrappers over the RPC).
-type Writer interface {
- // Put performs a write operation of container.SizeEstimation value
- // and returns any error encountered.
- //
- // All values after the Close call must be flushed to the
- // physical target. Implementations can cache values before
- // Close operation.
- //
- // Put must not be called after Close.
- Put(container.SizeEstimation) error
-
- // Close exits with method-providing Writer.
- //
- // All cached values must be flushed before
- // the Close's return.
- //
- // Methods must not be called after Close.
- io.Closer
-}
-
-// WriterProvider is a group of methods provided
-// by entity which generates keepers of
-// container.SizeEstimation values.
-type WriterProvider interface {
- // InitWriter should return an initialized Writer.
- //
- // Initialization problems are reported via error.
- // If no error was returned, then the Writer must not be nil.
- //
- // Implementations can have different logic for different
- // contexts, so specific ones may document their own behavior.
- InitWriter(context.Context) (Writer, error)
-}
diff --git a/pkg/services/container/announcement/load/controller/opts.go b/pkg/services/container/announcement/load/controller/opts.go
deleted file mode 100644
index 29148def01..0000000000
--- a/pkg/services/container/announcement/load/controller/opts.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package loadcontroller
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Controller.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns option to specify logging component.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/container/announcement/load/controller/util.go b/pkg/services/container/announcement/load/controller/util.go
deleted file mode 100644
index fb356393d6..0000000000
--- a/pkg/services/container/announcement/load/controller/util.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package loadcontroller
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
-)
-
-func usedSpaceFilterEpochEQ(epoch uint64) UsedSpaceFilter {
- return func(a container.SizeEstimation) bool {
- return a.Epoch() == epoch
- }
-}
-
-type storageWrapper struct {
- w Writer
- i Iterator
-}
-
-func (s storageWrapper) InitIterator(context.Context) (Iterator, error) {
- return s.i, nil
-}
-
-func (s storageWrapper) InitWriter(context.Context) (Writer, error) {
- return s.w, nil
-}
-
-func SimpleIteratorProvider(i Iterator) IteratorProvider {
- return &storageWrapper{
- i: i,
- }
-}
-
-func SimpleWriterProvider(w Writer) WriterProvider {
- return &storageWrapper{
- w: w,
- }
-}
diff --git a/pkg/services/container/announcement/load/route/calls.go b/pkg/services/container/announcement/load/route/calls.go
deleted file mode 100644
index 989120b402..0000000000
--- a/pkg/services/container/announcement/load/route/calls.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package loadroute
-
-import (
- "context"
- "encoding/hex"
- "sync"
-
- loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "go.uber.org/zap"
-)
-
-type routeContext struct {
- context.Context
-
- passedRoute []ServerInfo
-}
-
-// NewRouteContext wraps the main context of value passing with its traversal route.
-//
-// Passing the result to Router.InitWriter method will allow you to continue this route.
-func NewRouteContext(ctx context.Context, passed []ServerInfo) context.Context {
- return &routeContext{
- Context: ctx,
- passedRoute: passed,
- }
-}
-
-// InitWriter initializes and returns Writer that sends each value to its next route point.
-//
-// If ctx was created by NewRouteContext, then the traversed route is taken into account,
-// and the value will be sent to its continuation. Otherwise, the route will be laid
-// from scratch and the value will be sent to its primary point.
-//
-// After building a list of remote points of the next leg of the route, the value is sent
-// sequentially to all of them. If any transmissions (even all) fail, an error will not
-// be returned.
-//
-// Close of the composed Writer calls Close method on each internal Writer generated in
-// runtime and never returns an error.
-//
-// Always returns nil error.
-func (r *Router) InitWriter(ctx context.Context) (loadcontroller.Writer, error) {
- var (
- routeCtx *routeContext
- ok bool
- )
-
- if routeCtx, ok = ctx.(*routeContext); !ok {
- routeCtx = &routeContext{
- Context: ctx,
- passedRoute: []ServerInfo{r.localSrvInfo},
- }
- }
-
- return &loadWriter{
- router: r,
- ctx: routeCtx,
- mRoute: make(map[routeKey]*valuesRoute),
- mServers: make(map[string]loadcontroller.Writer),
- }, nil
-}
-
-type routeKey struct {
- epoch uint64
-
- cid string
-}
-
-type valuesRoute struct {
- route []ServerInfo
-
- values []container.SizeEstimation
-}
-
-type loadWriter struct {
- router *Router
-
- ctx *routeContext
-
- routeMtx sync.RWMutex
- mRoute map[routeKey]*valuesRoute
-
- mServers map[string]loadcontroller.Writer
-}
-
-func (w *loadWriter) Put(a container.SizeEstimation) error {
- w.routeMtx.Lock()
- defer w.routeMtx.Unlock()
-
- key := routeKey{
- epoch: a.Epoch(),
- cid: a.Container().EncodeToString(),
- }
-
- routeValues, ok := w.mRoute[key]
- if !ok {
- route, err := w.router.routeBuilder.NextStage(a, w.ctx.passedRoute)
- if err != nil {
- return err
- } else if len(route) == 0 {
- route = []ServerInfo{nil}
- }
-
- routeValues = &valuesRoute{
- route: route,
- values: []container.SizeEstimation{a},
- }
-
- w.mRoute[key] = routeValues
- }
-
- for _, remoteInfo := range routeValues.route {
- var key string
-
- if remoteInfo != nil {
- key = hex.EncodeToString(remoteInfo.PublicKey())
- }
-
- remoteWriter, ok := w.mServers[key]
- if !ok {
- provider, err := w.router.remoteProvider.InitRemote(remoteInfo)
- if err != nil {
- w.router.log.Debug("could not initialize writer provider",
- zap.String("error", err.Error()),
- )
-
- continue // best effort
- }
-
- remoteWriter, err = provider.InitWriter(w.ctx)
- if err != nil {
- w.router.log.Debug("could not initialize writer",
- zap.String("error", err.Error()),
- )
-
- continue // best effort
- }
-
- w.mServers[key] = remoteWriter
- }
-
- err := remoteWriter.Put(a)
- if err != nil {
- w.router.log.Debug("could not put the value",
- zap.String("error", err.Error()),
- )
- }
-
- // continue best effort
- }
-
- return nil
-}
-
-func (w *loadWriter) Close() error {
- for key, wRemote := range w.mServers {
- err := wRemote.Close()
- if err != nil {
- w.router.log.Debug("could not close remote server writer",
- zap.String("key", key),
- zap.String("error", err.Error()),
- )
- }
- }
-
- return nil
-}
diff --git a/pkg/services/container/announcement/load/route/deps.go b/pkg/services/container/announcement/load/route/deps.go
deleted file mode 100644
index 429cda3eb1..0000000000
--- a/pkg/services/container/announcement/load/route/deps.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package loadroute
-
-import (
- loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
-)
-
-// ServerInfo describes a set of
-// characteristics of a point in a route.
-type ServerInfo interface {
- // PublicKey returns public key of the node
- // from the route in a binary representation.
- PublicKey() []byte
-
- // Iterates over network addresses of the node
- // in the route. Breaks iterating on true return
- // of the handler.
- IterateAddresses(func(string) bool)
-
- // Returns number of server's network addresses.
- NumberOfAddresses() int
-
- // ExternalAddresses returns external node's addresses.
- ExternalAddresses() []string
-}
-
-// Builder groups methods to route values in the network.
-type Builder interface {
- // NextStage must return next group of route points for the value a
- // based on the passed route.
- //
- // Empty passed list means being at the starting point of the route.
- //
- // Must return empty list and no error if the endpoint of the route is reached.
- // If there are more than one point to go and the last passed point is included
- // in that list (means that point is the last point in one of the route groups),
- // returned route must contain nil point that should be interpreted as signal to,
- // among sending to other route points, save the announcement in that point.
- NextStage(a container.SizeEstimation, passed []ServerInfo) ([]ServerInfo, error)
-}
-
-// RemoteWriterProvider describes the component
-// for sending values to a fixed route point.
-type RemoteWriterProvider interface {
- // InitRemote must return WriterProvider to the route point
- // corresponding to info.
- //
- // Nil info matches the end of the route.
- InitRemote(info ServerInfo) (loadcontroller.WriterProvider, error)
-}
diff --git a/pkg/services/container/announcement/load/route/opts.go b/pkg/services/container/announcement/load/route/opts.go
deleted file mode 100644
index ab140ab4c7..0000000000
--- a/pkg/services/container/announcement/load/route/opts.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package loadroute
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Router.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns Option to specify logging component.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/container/announcement/load/route/placement/builder.go b/pkg/services/container/announcement/load/route/placement/builder.go
deleted file mode 100644
index 493b89723c..0000000000
--- a/pkg/services/container/announcement/load/route/placement/builder.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package placementrouter
-
-import "fmt"
-
-// Prm groups the required parameters of the Builder's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Calculator of the container members.
- //
- // Must not be nil.
- PlacementBuilder PlacementBuilder
-}
-
-// Builder represents component that routes used container space
-// values between nodes from the container.
-//
-// For correct operation, Builder must be created using
-// the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Builder is immediately ready to work through API.
-type Builder struct {
- placementBuilder PlacementBuilder
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Builder.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Builder does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm) *Builder {
- switch {
- case prm.PlacementBuilder == nil:
- panicOnPrmValue("PlacementBuilder", prm.PlacementBuilder)
- }
-
- return &Builder{
- placementBuilder: prm.PlacementBuilder,
- }
-}
diff --git a/pkg/services/container/announcement/load/route/placement/calls.go b/pkg/services/container/announcement/load/route/placement/calls.go
deleted file mode 100644
index 3db0d967c5..0000000000
--- a/pkg/services/container/announcement/load/route/placement/calls.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package placementrouter
-
-import (
- "bytes"
- "fmt"
-
- netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- loadroute "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/route"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
-)
-
-// NextStage composes container nodes for the container and epoch from a,
-// and returns the list of nodes with maximum weight (one from each vector).
-//
-// If passed route has more than one point, then endpoint of the route is reached.
-//
-// The traversed route is not checked, it is assumed to be correct.
-func (b *Builder) NextStage(a container.SizeEstimation, passed []loadroute.ServerInfo) ([]loadroute.ServerInfo, error) {
- if len(passed) > 1 {
- return nil, nil
- }
-
- cnr := a.Container()
-
- placement, err := b.placementBuilder.BuildPlacement(a.Epoch(), cnr)
- if err != nil {
- return nil, fmt.Errorf("could not build placement %s: %w", cnr, err)
- }
-
- res := make([]loadroute.ServerInfo, 0, len(placement))
-
- for i := range placement {
- if len(placement[i]) == 0 {
- continue
- }
-
- if len(passed) == 1 && bytes.Equal(passed[0].PublicKey(), placement[i][0].PublicKey()) {
- // add nil element so the announcement will be saved in local memory
- res = append(res, nil)
- } else {
- // add element with remote node to send announcement to
- res = append(res, netmapcore.Node(placement[i][0]))
- }
- }
-
- return res, nil
-}
diff --git a/pkg/services/container/announcement/load/route/placement/deps.go b/pkg/services/container/announcement/load/route/placement/deps.go
deleted file mode 100644
index 43339eb47f..0000000000
--- a/pkg/services/container/announcement/load/route/placement/deps.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package placementrouter
-
-import (
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-// PlacementBuilder describes interface of FrostFS placement calculator.
-type PlacementBuilder interface {
- // BuildPlacement must compose and sort (according to a specific algorithm)
- // storage nodes from the container by its identifier using network map
- // of particular epoch.
- BuildPlacement(epoch uint64, cnr cid.ID) ([][]netmap.NodeInfo, error)
-}
diff --git a/pkg/services/container/announcement/load/route/router.go b/pkg/services/container/announcement/load/route/router.go
deleted file mode 100644
index 6169a2aeec..0000000000
--- a/pkg/services/container/announcement/load/route/router.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package loadroute
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// Prm groups the required parameters of the Router's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Characteristics of the local node's server.
- //
- // Must not be nil.
- LocalServerInfo ServerInfo
-
- // Component for sending values to a fixed route point.
- //
- // Must not be nil.
- RemoteWriterProvider RemoteWriterProvider
-
- // Route planner.
- //
- // Must not be nil.
- Builder Builder
-}
-
-// Router represents component responsible for routing
-// used container space values over the network.
-//
-// For each fixed pair (container ID, epoch) there is a
-// single value route on the network. Router provides the
-// interface for writing values to the next point of the route.
-//
-// For correct operation, Router must be created using
-// the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Router is immediately ready to work through API.
-type Router struct {
- log *logger.Logger
-
- remoteProvider RemoteWriterProvider
-
- routeBuilder Builder
-
- localSrvInfo ServerInfo
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Router.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Router does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Router {
- switch {
- case prm.RemoteWriterProvider == nil:
- panicOnPrmValue("RemoteWriterProvider", prm.RemoteWriterProvider)
- case prm.Builder == nil:
- panicOnPrmValue("Builder", prm.Builder)
- case prm.LocalServerInfo == nil:
- panicOnPrmValue("LocalServerInfo", prm.LocalServerInfo)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &Router{
- log: o.log,
- remoteProvider: prm.RemoteWriterProvider,
- routeBuilder: prm.Builder,
- localSrvInfo: prm.LocalServerInfo,
- }
-}
diff --git a/pkg/services/container/announcement/load/route/util.go b/pkg/services/container/announcement/load/route/util.go
deleted file mode 100644
index fca1e57966..0000000000
--- a/pkg/services/container/announcement/load/route/util.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package loadroute
-
-import (
- "bytes"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
-)
-
-var errWrongRoute = errors.New("wrong route")
-
-// CheckRoute checks if the route is a route correctly constructed by the builder for value a.
-//
-// Returns nil if route is correct, otherwise an error clarifying the inconsistency.
-func CheckRoute(builder Builder, a container.SizeEstimation, route []ServerInfo) error {
- for i := 1; i < len(route); i++ {
- servers, err := builder.NextStage(a, route[:i])
- if err != nil {
- return err
- } else if len(servers) == 0 {
- break
- }
-
- found := false
-
- for j := range servers {
- if servers[j] == nil {
- // nil route point means that
- // (i-1)-th node in the route
- // must, among other things,
- // save the announcement to its
- // local memory
- continue
- }
-
- if bytes.Equal(servers[j].PublicKey(), route[i].PublicKey()) {
- found = true
- break
- }
- }
-
- if !found {
- return errWrongRoute
- }
- }
-
- return nil
-}
diff --git a/pkg/services/container/announcement/load/storage/storage.go b/pkg/services/container/announcement/load/storage/storage.go
deleted file mode 100644
index 70d82fc4e3..0000000000
--- a/pkg/services/container/announcement/load/storage/storage.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package loadstorage
-
-import (
- "sort"
- "sync"
-
- loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
-)
-
-type usedSpaceEstimations struct {
- announcement container.SizeEstimation
-
- sizes []uint64
-}
-
-type storageKey struct {
- epoch uint64
-
- cid string
-}
-
-// Storage represents in-memory storage of
-// container.SizeEstimation values.
-//
-// The write operation has the usual behavior - to save
-// the next number of used container space for a specific epoch.
-// All values related to one key (epoch, container ID) are stored
-// as a list.
-//
-// Storage also provides an iterator interface, into the handler
-// of which the final score is passed, built on all values saved
-// at the time of the call. Currently the only possible estimation
-// formula is used - the average between 10th and 90th percentile.
-//
-// For correct operation, Storage must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// Storage is immediately ready to work through API.
-type Storage struct {
- mtx sync.RWMutex
-
- mItems map[storageKey]*usedSpaceEstimations
-}
-
-// Prm groups the required parameters of the Storage's constructor.
-//
-// The component is not parameterizable at the moment.
-type Prm struct{}
-
-// New creates a new instance of the Storage.
-//
-// The created Storage does not require additional
-// initialization and is completely ready for work.
-func New(_ Prm) *Storage {
- return &Storage{
- mItems: make(map[storageKey]*usedSpaceEstimations),
- }
-}
-
-// Put appends the next value of the occupied container space for the epoch
-// to the list of already saved values.
-//
-// Always returns nil error.
-func (s *Storage) Put(a container.SizeEstimation) error {
- s.mtx.Lock()
-
- {
- key := storageKey{
- epoch: a.Epoch(),
- cid: a.Container().EncodeToString(),
- }
-
- estimations, ok := s.mItems[key]
- if !ok {
- estimations = &usedSpaceEstimations{
- announcement: a,
- sizes: make([]uint64, 0, 1),
- }
-
- s.mItems[key] = estimations
- }
-
- estimations.sizes = append(estimations.sizes, a.Value())
- }
-
- s.mtx.Unlock()
-
- return nil
-}
-
-func (s *Storage) Close() error {
- return nil
-}
-
-// Iterate goes through all the lists with the key (container ID, epoch),
-// calculates the final grade for all values, and passes it to the handler.
-//
-// Final grade is the average between 10th and 90th percentiles.
-func (s *Storage) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontroller.UsedSpaceHandler) (err error) {
- s.mtx.RLock()
-
- {
- for _, v := range s.mItems {
- if f(v.announcement) {
- // calculate estimation based on 90th percentile
- v.announcement.SetValue(finalEstimation(v.sizes))
-
- if err = h(v.announcement); err != nil {
- break
- }
- }
- }
- }
-
- s.mtx.RUnlock()
-
- return
-}
-
-func finalEstimation(vals []uint64) uint64 {
- sort.Slice(vals, func(i, j int) bool {
- return vals[i] < vals[j]
- })
-
- const (
- lowerRank = 10
- upperRank = 90
- )
-
- if len(vals) >= lowerRank {
- lowerInd := percentile(lowerRank, vals)
- upperInd := percentile(upperRank, vals)
-
- vals = vals[lowerInd:upperInd]
- }
-
- sum := uint64(0)
-
- for i := range vals {
- sum += vals[i]
- }
-
- return sum / uint64(len(vals))
-}
-
-func percentile(rank int, vals []uint64) int {
- p := len(vals) * rank / 100
- return p
-}
diff --git a/pkg/services/container/announcement/load/storage/storage_test.go b/pkg/services/container/announcement/load/storage/storage_test.go
deleted file mode 100644
index 20e73627dc..0000000000
--- a/pkg/services/container/announcement/load/storage/storage_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package loadstorage
-
-import (
- "math/rand"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestStorage(t *testing.T) {
- const epoch uint64 = 13
-
- var a container.SizeEstimation
- a.SetContainer(cidtest.ID())
- a.SetEpoch(epoch)
-
- const opinionsNum = 100
-
- s := New(Prm{})
-
- opinions := make([]uint64, opinionsNum)
- for i := range opinions {
- opinions[i] = rand.Uint64()
-
- a.SetValue(opinions[i])
-
- require.NoError(t, s.Put(a))
- }
-
- iterCounter := 0
-
- err := s.Iterate(
- func(ai container.SizeEstimation) bool {
- return ai.Epoch() == epoch
- },
- func(ai container.SizeEstimation) error {
- iterCounter++
-
- require.Equal(t, epoch, ai.Epoch())
- require.Equal(t, a.Container(), ai.Container())
- require.Equal(t, finalEstimation(opinions), ai.Value())
-
- return nil
- },
- )
- require.NoError(t, err)
- require.Equal(t, 1, iterCounter)
-}
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
new file mode 100644
index 0000000000..3b5dab9aa7
--- /dev/null
+++ b/pkg/services/container/ape.go
@@ -0,0 +1,756 @@
+package container
+
+import (
+ "bytes"
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+
+ aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
+ containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "google.golang.org/grpc/peer"
+)
+
+var (
+ errMissingContainerID = errors.New("missing container ID")
+ errSessionContainerMissmatch = errors.New("requested container is not related to the session")
+ errMissingVerificationHeader = errors.New("malformed request: empty verification header")
+ errInvalidSessionTokenSignature = errors.New("malformed request: invalid session token signature")
+ errInvalidSessionTokenOwner = errors.New("malformed request: invalid session token owner")
+ errEmptyBodySignature = errors.New("malformed request: empty body signature")
+ errMissingOwnerID = errors.New("malformed request: missing owner ID")
+ errOwnerIDIsNotSet = errors.New("owner id is not set")
+ errInvalidDomainZone = errors.New("invalid domain zone: no namespace is expected")
+
+ undefinedContainerID = cid.ID{}
+)
+
+type ir interface {
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
+}
+
+type containers interface {
+ Get(context.Context, cid.ID) (*containercore.Container, error)
+}
+
+type apeChecker struct {
+ router policyengine.ChainRouter
+ reader containers
+ ir ir
+ nm netmap.Source
+
+ frostFSIDClient frostfsidcore.SubjectProvider
+
+ next Server
+}
+
+func NewAPEServer(router policyengine.ChainRouter, reader containers, ir ir, nm netmap.Source, frostFSIDClient frostfsidcore.SubjectProvider, srv Server) Server {
+ return &apeChecker{
+ router: router,
+ reader: reader,
+ ir: ir,
+ next: srv,
+ nm: nm,
+ frostFSIDClient: frostFSIDClient,
+ }
+}
+
+func (ac *apeChecker) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Delete")
+ defer span.End()
+
+ if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(),
+ nativeschema.MethodDeleteContainer); err != nil {
+ return nil, err
+ }
+
+ return ac.next.Delete(ctx, req)
+}
+
+func (ac *apeChecker) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Get")
+ defer span.End()
+
+ if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(),
+ nativeschema.MethodGetContainer); err != nil {
+ return nil, err
+ }
+
+ return ac.next.Get(ctx, req)
+}
+
+func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List")
+ defer span.End()
+
+ role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ reqProps := map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
+ nativeschema.PropertyKeyActorRole: role,
+ }
+
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := peer.FromContext(ctx); ok {
+ if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
+ reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
+ }
+ }
+
+ namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
+ if err != nil {
+ return nil, fmt.Errorf("could not get owner namespace: %w", err)
+ }
+ if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
+ return nil, err
+ }
+
+ request := aperequest.NewRequest(
+ nativeschema.MethodListContainers,
+ aperequest.NewResource(
+ resourceName(namespace, ""),
+ make(map[string]string),
+ ),
+ reqProps,
+ )
+
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get group ids: %w", err)
+ }
+
+ // Policy contract keeps group related chains as namespace-group pair.
+ for i := range groups {
+ groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
+ }
+
+ rt := policyengine.NewRequestTargetWithNamespace(namespace)
+ rt.User = &policyengine.Target{
+ Type: policyengine.User,
+ Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
+ }
+ rt.Groups = make([]policyengine.Target, len(groups))
+ for i := range groups {
+ rt.Groups[i] = policyengine.GroupTarget(groups[i])
+ }
+
+ s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
+ if err != nil {
+ return nil, err
+ }
+
+ if found && s == apechain.Allow {
+ return ac.next.List(ctx, req)
+ }
+
+ return nil, apeErr(nativeschema.MethodListContainers, s)
+}
+
+func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream")
+ defer span.End()
+
+ role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ if err != nil {
+ return err
+ }
+
+ reqProps := map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
+ nativeschema.PropertyKeyActorRole: role,
+ }
+
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ if err != nil {
+ return err
+ }
+ if p, ok := peer.FromContext(ctx); ok {
+ if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
+ reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
+ }
+ }
+
+ namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
+ if err != nil {
+ return fmt.Errorf("could not get owner namespace: %w", err)
+ }
+ if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
+ return err
+ }
+
+ request := aperequest.NewRequest(
+ nativeschema.MethodListContainers,
+ aperequest.NewResource(
+ resourceName(namespace, ""),
+ make(map[string]string),
+ ),
+ reqProps,
+ )
+
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ if err != nil {
+ return fmt.Errorf("failed to get group ids: %w", err)
+ }
+
+ // Policy contract keeps group related chains as namespace-group pair.
+ for i := range groups {
+ groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
+ }
+
+ rt := policyengine.NewRequestTargetWithNamespace(namespace)
+ rt.User = &policyengine.Target{
+ Type: policyengine.User,
+ Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
+ }
+ rt.Groups = make([]policyengine.Target, len(groups))
+ for i := range groups {
+ rt.Groups[i] = policyengine.GroupTarget(groups[i])
+ }
+
+ s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
+ if err != nil {
+ return err
+ }
+
+ if found && s == apechain.Allow {
+ return ac.next.ListStream(req, stream)
+ }
+
+ return apeErr(nativeschema.MethodListContainers, s)
+}
+
+func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put")
+ defer span.End()
+
+ role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ reqProps := map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
+ nativeschema.PropertyKeyActorRole: role,
+ }
+
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := peer.FromContext(ctx); ok {
+ if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
+ reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
+ }
+ }
+
+ namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID())
+ if err != nil {
+ return nil, fmt.Errorf("get namespace error: %w", err)
+ }
+ if err = validateNamespace(req.GetBody().GetContainer(), namespace); err != nil {
+ return nil, err
+ }
+
+ cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer())
+ if err != nil {
+ return nil, fmt.Errorf("get container properties: %w", err)
+ }
+
+ request := aperequest.NewRequest(
+ nativeschema.MethodPutContainer,
+ aperequest.NewResource(
+ resourceName(namespace, ""),
+ cnrProps,
+ ),
+ reqProps,
+ )
+
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get group ids: %w", err)
+ }
+
+ // Policy contract keeps group related chains as namespace-group pair.
+ for i := range groups {
+ groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
+ }
+
+ rt := policyengine.NewRequestTargetWithNamespace(namespace)
+ rt.User = &policyengine.Target{
+ Type: policyengine.User,
+ Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
+ }
+ rt.Groups = make([]policyengine.Target, len(groups))
+ for i := range groups {
+ rt.Groups[i] = policyengine.GroupTarget(groups[i])
+ }
+
+ s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
+ if err != nil {
+ return nil, err
+ }
+
+ if found && s == apechain.Allow {
+ return ac.next.Put(ctx, req)
+ }
+
+ return nil, apeErr(nativeschema.MethodPutContainer, s)
+}
+
+func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
+ if vh == nil {
+ return "", nil, errMissingVerificationHeader
+ }
+
+ if oID == nil {
+ return "", nil, errMissingOwnerID
+ }
+ var ownerID user.ID
+ if err := ownerID.ReadFromV2(*oID); err != nil {
+ return "", nil, err
+ }
+
+ actor, pk, err := ac.getActorAndPublicKey(mh, vh, undefinedContainerID)
+ if err != nil {
+ return "", nil, err
+ }
+
+ if actor.Equals(ownerID) {
+ return nativeschema.PropertyValueContainerRoleOwner, pk, nil
+ }
+
+ pkBytes := pk.Bytes()
+ isIR, err := ac.isInnerRingKey(ctx, pkBytes)
+ if err != nil {
+ return "", nil, err
+ }
+ if isIR {
+ return nativeschema.PropertyValueContainerRoleIR, pk, nil
+ }
+
+ return nativeschema.PropertyValueContainerRoleOthers, pk, nil
+}
+
+func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, containerID *refs.ContainerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, op string) error {
+ if vh == nil {
+ return errMissingVerificationHeader
+ }
+
+ id, err := getContainerID(containerID)
+ if err != nil {
+ return err
+ }
+
+ cont, err := ac.reader.Get(ctx, id)
+ if err != nil {
+ return err
+ }
+
+ reqProps, pk, err := ac.getRequestProps(ctx, mh, vh, cont, id)
+ if err != nil {
+ return err
+ }
+
+ namespace := ""
+ cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cont.Value).Zone(), ".ns")
+ if hasNamespace {
+ namespace = cntNamespace
+ }
+
+ groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
+ if err != nil {
+ return fmt.Errorf("failed to get group ids: %w", err)
+ }
+
+ // Policy contract keeps group related chains as namespace-group pair.
+ for i := range groups {
+ groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
+ }
+
+ request := aperequest.NewRequest(
+ op,
+ aperequest.NewResource(
+ resourceName(namespace, id.EncodeToString()),
+ getContainerProps(cont),
+ ),
+ reqProps,
+ )
+
+ s, found, err := ac.router.IsAllowed(apechain.Ingress,
+ policyengine.NewRequestTargetExtended(namespace, id.EncodeToString(), fmt.Sprintf("%s:%s", namespace, pk.Address()), groups),
+ request)
+ if err != nil {
+ return err
+ }
+
+ if found && s == apechain.Allow {
+ return nil
+ }
+
+ return apeErr(op, s)
+}
+
+func apeErr(operation string, status apechain.Status) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(fmt.Sprintf("access to container operation %s is denied by access policy engine: %s", operation, status.String()))
+ return errAccessDenied
+}
+
+func getContainerID(reqContID *refs.ContainerID) (cid.ID, error) {
+ if reqContID == nil {
+ return cid.ID{}, errMissingContainerID
+ }
+ var id cid.ID
+ err := id.ReadFromV2(*reqContID)
+ if err != nil {
+ return cid.ID{}, fmt.Errorf("invalid container ID: %w", err)
+ }
+ return id, nil
+}
+
+func resourceName(namespace string, container string) string {
+ if namespace == "" && container == "" {
+ return nativeschema.ResourceFormatRootContainers
+ }
+ if namespace == "" && container != "" {
+ return fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container)
+ }
+ if namespace != "" && container == "" {
+ return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, namespace)
+ }
+ return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container)
+}
+
+func getContainerProps(c *containercore.Container) map[string]string {
+ props := map[string]string{
+ nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(),
+ }
+ for attrName, attrVal := range c.Value.Attributes() {
+ name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName)
+ props[name] = attrVal
+ }
+ return props
+}
+
+func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) {
+ if cnrV2 == nil {
+ return nil, errors.New("container is not set")
+ }
+ c := cnrSDK.Container{}
+ if err := c.ReadFromV2(*cnrV2); err != nil {
+ return nil, err
+ }
+ return getContainerProps(&containercore.Container{Value: c}), nil
+}
+
+func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader,
+ cont *containercore.Container, cnrID cid.ID,
+) (map[string]string, *keys.PublicKey, error) {
+ actor, pk, err := ac.getActorAndPublicKey(mh, vh, cnrID)
+ if err != nil {
+ return nil, nil, err
+ }
+ role, err := ac.getRole(ctx, actor, pk, cont, cnrID)
+ if err != nil {
+ return nil, nil, err
+ }
+ reqProps := map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
+ nativeschema.PropertyKeyActorRole: role,
+ }
+ reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
+ if err != nil {
+ return nil, nil, err
+ }
+ if p, ok := peer.FromContext(ctx); ok {
+ if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
+ reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
+ }
+ }
+ return reqProps, pk, nil
+}
+
+func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
+ if cont.Value.Owner().Equals(*actor) {
+ return nativeschema.PropertyValueContainerRoleOwner, nil
+ }
+
+ pkBytes := pk.Bytes()
+ isIR, err := ac.isInnerRingKey(ctx, pkBytes)
+ if err != nil {
+ return "", err
+ }
+ if isIR {
+ return nativeschema.PropertyValueContainerRoleIR, nil
+ }
+
+ isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont)
+ if err != nil {
+ return "", err
+ }
+ if isContainer {
+ return nativeschema.PropertyValueContainerRoleContainer, nil
+ }
+
+ return nativeschema.PropertyValueContainerRoleOthers, nil
+}
+
+func (ac *apeChecker) getActorAndPublicKey(mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, cnrID cid.ID) (*user.ID, *keys.PublicKey, error) {
+ st, err := ac.getSessionToken(mh)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if st != nil {
+ return ac.getActorAndPKFromSessionToken(st, cnrID)
+ }
+ return ac.getActorAndPKFromSignature(vh)
+}
+
+func (ac *apeChecker) getActorAndPKFromSignature(vh *session.RequestVerificationHeader) (*user.ID, *keys.PublicKey, error) {
+ for vh.GetOrigin() != nil {
+ vh = vh.GetOrigin()
+ }
+ sig := vh.GetBodySignature()
+ if sig == nil {
+ return nil, nil, errEmptyBodySignature
+ }
+ key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid signature key: %w", err)
+ }
+
+ var userID user.ID
+ user.IDFromKey(&userID, (ecdsa.PublicKey)(*key))
+
+ return &userID, key, nil
+}
+
+func (ac *apeChecker) getSessionToken(mh *session.RequestMetaHeader) (*sessionSDK.Container, error) {
+ for mh.GetOrigin() != nil {
+ mh = mh.GetOrigin()
+ }
+ st := mh.GetSessionToken()
+ if st == nil {
+ return nil, nil
+ }
+
+ var tok sessionSDK.Container
+ err := tok.ReadFromV2(*st)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ return &tok, nil
+}
+
+func (ac *apeChecker) getActorAndPKFromSessionToken(st *sessionSDK.Container, cnrID cid.ID) (*user.ID, *keys.PublicKey, error) {
+ if cnrID != undefinedContainerID && !st.AppliedTo(cnrID) {
+ return nil, nil, errSessionContainerMissmatch
+ }
+ if !st.VerifySignature() {
+ return nil, nil, errInvalidSessionTokenSignature
+ }
+ var tok session.Token
+ st.WriteToV2(&tok)
+
+ signaturePublicKey, err := keys.NewPublicKeyFromBytes(tok.GetSignature().GetKey(), elliptic.P256())
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid key in session token signature: %w", err)
+ }
+
+ tokenIssuer := st.Issuer()
+ if !isOwnerFromKey(tokenIssuer, signaturePublicKey) {
+ return nil, nil, errInvalidSessionTokenOwner
+ }
+
+ return &tokenIssuer, signaturePublicKey, nil
+}
+
+func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
+ if key == nil {
+ return false
+ }
+
+ var id2 user.ID
+ user.IDFromKey(&id2, (ecdsa.PublicKey)(*key))
+
+ return id2.Equals(id)
+}
+
+func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) {
+ innerRingKeys, err := ac.ir.InnerRingKeys(ctx)
+ if err != nil {
+ return false, err
+ }
+
+ for i := range innerRingKeys {
+ if bytes.Equal(innerRingKeys[i], pk) {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
+ binCnrID := make([]byte, sha256.Size)
+ cnrID.Encode(binCnrID)
+
+ nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm)
+ if err != nil {
+ return false, err
+ }
+
+ if isContainerNode(nm, pk, binCnrID, cont) {
+ return true, nil
+ }
+
+ // then check previous netmap, this can happen in-between epoch change
+ // when node migrates data from last epoch container
+ nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm)
+ if err != nil {
+ return false, err
+ }
+
+ return isContainerNode(nm, pk, binCnrID, cont), nil
+}
+
+func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool {
+ // It could an error only if the network map doesn't have enough nodes to
+ // fulfil the policy. It's a logical error that doesn't affect an actor role
+ // determining, so we ignore it
+ cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
+
+ for i := range cnrVectors {
+ for j := range cnrVectors[i] {
+ if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
+ var ownerSDK user.ID
+ if owner == nil {
+ return "", errOwnerIDIsNotSet
+ }
+ if err := ownerSDK.ReadFromV2(*owner); err != nil {
+ return "", err
+ }
+ addr := ownerSDK.ScriptHash()
+
+ namespace := ""
+ subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
+ if err == nil {
+ namespace = subject.Namespace
+ } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
+ return "", fmt.Errorf("get subject error: %w", err)
+ }
+ return namespace, nil
+}
+
+func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
+ var ownerSDK user.ID
+ if owner == nil {
+ return "", errOwnerIDIsNotSet
+ }
+ if err := ownerSDK.ReadFromV2(*owner); err != nil {
+ return "", err
+ }
+ addr := ownerSDK.ScriptHash()
+ subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
+ if err != nil {
+ return "", fmt.Errorf("get subject error: %w", err)
+ }
+ return subject.Namespace, nil
+}
+
+// validateNamespace validates a namespace set in a container.
+// If frostfs-id contract stores a namespace N1 for an owner ID and a container within a request
+// is set with namespace N2 (via Zone() property), then N2 is invalid and the request is denied.
+func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) error {
+ if cnrV2 == nil {
+ return nil
+ }
+ var cnr cnrSDK.Container
+ if err := cnr.ReadFromV2(*cnrV2); err != nil {
+ return err
+ }
+ cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr).Zone(), ".ns")
+ if hasNamespace {
+ if cntNamespace != ownerIDNamespace {
+ if ownerIDNamespace == "" {
+ return errInvalidDomainZone
+ }
+ return fmt.Errorf("invalid domain zone: expected namespace %s, but got %s", ownerIDNamespace, cntNamespace)
+ }
+ } else if ownerIDNamespace != "" {
+ return fmt.Errorf("invalid domain zone: expected namespace %s, but got invalid or empty", ownerIDNamespace)
+ }
+ return nil
+}
+
+// validateNamespace validates if a namespace of a request actor equals to owner's namespace.
+// An actor's namespace is calculated by a public key.
+func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error {
+ var actor user.ID
+ user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
+ actorOwnerID := new(refs.OwnerID)
+ actor.WriteToV2(actorOwnerID)
+ actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID)
+ if err != nil {
+ return fmt.Errorf("could not get actor namespace: %w", err)
+ }
+ if actorNamespace != ownerIDNamespace {
+ return fmt.Errorf("actor namespace %s differs from owner: %s", actorNamespace, ownerIDNamespace)
+ }
+ return nil
+}
+
+// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
+func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
+ if reqProps == nil {
+ reqProps = make(map[string]string)
+ }
+ props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk)
+ if err != nil {
+ return reqProps, err
+ }
+ for propertyName, properyValue := range props {
+ reqProps[propertyName] = properyValue
+ }
+ return reqProps, nil
+}
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
new file mode 100644
index 0000000000..6438c34ca9
--- /dev/null
+++ b/pkg/services/container/ape_test.go
@@ -0,0 +1,1767 @@
+package container
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+ commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc/peer"
+)
+
+const (
+ testDomainName = "testdomainname"
+ testDomainZone = "testdomainname.ns"
+)
+
+func TestAPE(t *testing.T) {
+ t.Parallel()
+ t.Run("allow then deny get container", testAllowThenDenyGetContainerRuleDefined)
+ t.Run("allow by group id", TestAllowByGroupIDs)
+ t.Run("deny get container no rule found", testDenyGetContainerNoRuleFound)
+ t.Run("deny get container for others", testDenyGetContainerForOthers)
+ t.Run("deny get container by user claim tag", testDenyGetContainerByUserClaimTag)
+ t.Run("deny get container by IP", testDenyGetContainerByIP)
+ t.Run("deny get container by group id", testDenyGetContainerByGroupID)
+ t.Run("deny put container for others with session token", testDenyPutContainerForOthersSessionToken)
+ t.Run("deny put container, read namespace from frostfsID", testDenyPutContainerReadNamespaceFromFrostfsID)
+ t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace)
+ t.Run("deny list containers for owner with PK", testDenyListContainersForPK)
+ t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError)
+ t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr)
+ t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr)
+}
+
+const (
+ incomingIP = "192.92.33.1"
+)
+
+func ctxWithPeerInfo() context.Context {
+ return peer.NewContext(context.Background(), &peer.Peer{
+ Addr: &net.TCPAddr{
+ IP: net.ParseIP(incomingIP),
+ Port: 41111,
+ },
+ })
+}
+
+func testAllowThenDenyGetContainerRuleDefined(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{},
+ }
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ addDefaultAllowGetPolicy(t, router, contID)
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ _, err = apeSrv.Get(context.Background(), req)
+ require.NoError(t, err)
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ resp, err := apeSrv.Get(context.Background(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+ require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
+}
+
+func TestAllowByGroupIDs(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 1,
+ Name: "Group#1",
+ },
+ },
+ },
+ },
+ }
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.GroupTarget(":1"), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: commonschema.PropertyKeyFrostFSIDGroupID,
+ Value: "1",
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ resp, err := apeSrv.Get(context.Background(), req)
+ require.NotNil(t, resp)
+ require.NoError(t, err)
+}
+
+func testDenyGetContainerNoRuleFound(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{},
+ }
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ resp, err := apeSrv.Get(context.Background(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+ require.Contains(t, errAccessDenied.Reason(), chain.NoRuleFound.String())
+}
+
+func testDenyGetContainerForOthers(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{},
+ }
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ resp, err := apeSrv.Get(context.Background(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+}
+
+func testDenyGetContainerByUserClaimTag(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19888,
+ },
+ },
+ },
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, "tag-attr1"),
+ Value: "value100",
+ Op: chain.CondStringNotEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ resp, err := apeSrv.Get(context.Background(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+}
+
+func testDenyGetContainerByIP(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19888,
+ },
+ },
+ },
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: commonschema.PropertyKeyFrostFSSourceIP,
+ Value: incomingIP + "/16",
+ Op: chain.CondIPAddress,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ resp, err := apeSrv.Get(ctxWithPeerInfo(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+ require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
+}
+
+func testDenyGetContainerSysZoneAttr(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19888,
+ },
+ },
+ },
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindResource,
+ Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
+ Value: "eggplant",
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ resp, err := apeSrv.Get(ctxWithPeerInfo(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+ require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
+}
+
+func testDenyPutContainerSysZoneAttr(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ testContainer.SetAttribute(container.SysAttributeZone, "eggplant")
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+ owner := testContainer.Owner()
+ ownerAddr := owner.ScriptHash()
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ ownerAddr: {},
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ ownerAddr: {},
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodPutContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ nativeschema.ResourceFormatRootContainers,
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindResource,
+ Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone),
+ Value: "eggplant",
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := initPutRequest(t, testContainer)
+
+ resp, err := apeSrv.Put(ctxWithPeerInfo(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+ require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
+}
+
+func testDenyGetContainerByGroupID(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ pk.PublicKey().GetScriptHash(): {
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19888,
+ },
+ },
+ },
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ contRdr.c[contID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(nm.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ nm.netmaps[nm.currentEpoch] = &testNetmap
+ nm.netmaps[nm.currentEpoch-1] = &testNetmap
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: commonschema.PropertyKeyFrostFSIDGroupID,
+ Value: "19888",
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ resp, err := apeSrv.Get(context.Background(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+}
+
+func testDenyPutContainerForOthersSessionToken(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+
+ testContainer := containertest.Container()
+ owner := testContainer.Owner()
+ ownerAddr := owner.ScriptHash()
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ ownerAddr: {},
+ },
+ }
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+
+ _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodPutContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ nativeschema.ResourceFormatRootContainers,
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := initPutRequest(t, testContainer)
+
+ resp, err := apeSrv.Put(context.Background(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+}
+
+func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+
+ cnrID, testContainer := initTestContainer(t, true)
+ contRdr.c[cnrID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+
+ _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodPutContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := initPutRequest(t, testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(testContainer)
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ ownerScriptHash: {
+ Namespace: testDomainName,
+ Name: testDomainName,
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ ownerScriptHash: {
+ Namespace: testDomainName,
+ Name: testDomainName,
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19888,
+ },
+ },
+ },
+ },
+ }
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+ resp, err := apeSrv.Put(context.Background(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+}
+
+func testDenyPutContainerInvalidNamespace(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+
+ cnrID, testContainer := initTestContainer(t, false)
+ var domain cnrSDK.Domain
+ domain.SetName("incorrect" + testDomainName)
+ domain.SetZone("incorrect" + testDomainZone)
+ cnrSDK.WriteDomain(&testContainer, domain)
+ contRdr.c[cnrID] = &containercore.Container{Value: testContainer}
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+
+ _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodPutContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := initPutRequest(t, testContainer)
+ ownerScriptHash := initOwnerIDScriptHash(testContainer)
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ ownerScriptHash: {
+ Namespace: testDomainName,
+ Name: testDomainName,
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ ownerScriptHash: {
+ Namespace: testDomainName,
+ Name: testDomainName,
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19888,
+ },
+ },
+ },
+ },
+ }
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+ resp, err := apeSrv.Put(context.Background(), req)
+ require.Nil(t, resp)
+ require.ErrorContains(t, err, "invalid domain zone")
+}
+
+func testDenyListContainersForPK(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{},
+ }
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodListContainers,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ nativeschema.ResourceFormatRootContainers,
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(pk.PublicKey().Bytes()),
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ var userID user.ID
+ user.IDFromKey(&userID, pk.PrivateKey.PublicKey)
+
+ req := &container.ListRequest{}
+ req.SetBody(&container.ListRequestBody{})
+ var ownerID refs.OwnerID
+ userID.WriteToV2(&ownerID)
+ req.GetBody().SetOwnerID(&ownerID)
+
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ resp, err := apeSrv.List(context.Background(), req)
+ require.Nil(t, resp)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ require.ErrorAs(t, err, &errAccessDenied)
+}
+
+func testDenyListContainersValidationNamespaceError(t *testing.T) {
+ t.Parallel()
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+ router := inmemory.NewInMemory()
+ contRdr := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+ nm := &netmapStub{}
+
+ actorPK, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ ownerPK, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ actorScriptHash, ownerScriptHash := initActorOwnerScriptHashes(t, actorPK, ownerPK)
+
+ const actorDomain = "actor" + testDomainName
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ actorScriptHash: {
+ Namespace: actorDomain,
+ Name: actorDomain,
+ },
+ ownerScriptHash: {
+ Namespace: testDomainName,
+ Name: testDomainName,
+ },
+ },
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{
+ actorScriptHash: {
+ Namespace: actorDomain,
+ Name: actorDomain,
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19777,
+ },
+ },
+ },
+ ownerScriptHash: {
+ Namespace: testDomainName,
+ Name: testDomainName,
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 19888,
+ },
+ },
+ },
+ },
+ }
+
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
+
+ nm.currentEpoch = 100
+ nm.netmaps = map[uint64]*netmap.NetMap{}
+
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodListContainers,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ nativeschema.ResourceFormatRootContainers,
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorPublicKey,
+ Value: actorPK.PublicKey().String(),
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := initListRequest(t, actorPK, ownerPK)
+
+ resp, err := apeSrv.List(context.Background(), req)
+ require.Nil(t, resp)
+ require.ErrorContains(t, err, "actor namespace "+actorDomain+" differs")
+}
+
+type srvStub struct {
+ calls map[string]int
+}
+
+func (s *srvStub) Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error) {
+ s.calls["Delete"]++
+ return &container.DeleteResponse{}, nil
+}
+
+func (s *srvStub) Get(context.Context, *container.GetRequest) (*container.GetResponse, error) {
+ s.calls["Get"]++
+ return &container.GetResponse{}, nil
+}
+
+func (s *srvStub) List(context.Context, *container.ListRequest) (*container.ListResponse, error) {
+ s.calls["List"]++
+ return &container.ListResponse{}, nil
+}
+
+func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error {
+ s.calls["ListStream"]++
+ return nil
+}
+
+func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) {
+ s.calls["Put"]++
+ return &container.PutResponse{}, nil
+}
+
+type irStub struct {
+ keys [][]byte
+}
+
+func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) {
+ return s.keys, nil
+}
+
+type containerStub struct {
+ c map[cid.ID]*containercore.Container
+}
+
+func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) {
+ if v, ok := s.c[id]; ok {
+ return v, nil
+ }
+ return nil, errors.New("container not found")
+}
+
+type netmapStub struct {
+ netmaps map[uint64]*netmap.NetMap
+ currentEpoch uint64
+}
+
+func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
+ if diff >= s.currentEpoch {
+ return nil, errors.New("invalid diff")
+ }
+ return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
+}
+
+func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
+ if nm, found := s.netmaps[epoch]; found {
+ return nm, nil
+ }
+ return nil, errors.New("netmap not found")
+}
+
+func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
+ return s.currentEpoch, nil
+}
+
+type frostfsidStub struct {
+ subjects map[util.Uint160]*client.Subject
+ subjectsExt map[util.Uint160]*client.SubjectExtended
+}
+
+func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) {
+ s, ok := f.subjects[owner]
+ if !ok {
+ return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
+ }
+ return s, nil
+}
+
+func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) {
+ s, ok := f.subjectsExt[owner]
+ if !ok {
+ return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
+ }
+ return s, nil
+}
+
+type testAPEServer struct {
+ engine engine.Engine
+
+ containerReader *containerStub
+
+ ir *irStub
+
+ netmap *netmapStub
+
+ frostfsIDSubjectReader *frostfsidStub
+
+ apeChecker *apeChecker
+}
+
+func newTestAPEServer() testAPEServer {
+ srv := &srvStub{
+ calls: map[string]int{},
+ }
+
+ engine := inmemory.NewInMemory()
+
+ containerReader := &containerStub{
+ c: map[cid.ID]*containercore.Container{},
+ }
+
+ ir := &irStub{
+ keys: [][]byte{},
+ }
+
+ netmap := &netmapStub{}
+
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{},
+ subjectsExt: map[util.Uint160]*client.SubjectExtended{},
+ }
+
+ apeChecker := &apeChecker{
+ router: engine,
+ reader: containerReader,
+ ir: ir,
+ nm: netmap,
+ frostFSIDClient: frostfsIDSubjectReader,
+ next: srv,
+ }
+
+ return testAPEServer{
+ engine: engine,
+ containerReader: containerReader,
+ ir: ir,
+ netmap: netmap,
+ frostfsIDSubjectReader: frostfsIDSubjectReader,
+ apeChecker: apeChecker,
+ }
+}
+
+func TestValidateContainerBoundedOperation(t *testing.T) {
+ t.Parallel()
+
+ t.Run("check root-defined container in root-defined container target rule", func(t *testing.T) {
+ t.Parallel()
+
+ components := newTestAPEServer()
+ contID, testContainer := initTestContainer(t, false)
+ components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
+ initTestNetmap(components.netmap)
+
+ _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := initTestGetContainerRequest(t, contID)
+
+ err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
+ aErr := apeErr(nativeschema.MethodGetContainer, chain.AccessDenied)
+ require.ErrorContains(t, err, aErr.Error())
+ })
+
+ t.Run("check root-defined container in testdomain-defined container target rule", func(t *testing.T) {
+ t.Parallel()
+
+ components := newTestAPEServer()
+ contID, testContainer := initTestContainer(t, false)
+ components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
+ initTestNetmap(components.netmap)
+
+ _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, testDomainName, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ addDefaultAllowGetPolicy(t, components.engine, contID)
+
+ req := initTestGetContainerRequest(t, contID)
+
+ err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
+ require.NoError(t, err)
+ })
+
+ t.Run("check root-defined container in testdomain namespace target rule", func(t *testing.T) {
+ t.Parallel()
+
+ components := newTestAPEServer()
+ contID, testContainer := initTestContainer(t, false)
+ components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
+ initTestNetmap(components.netmap)
+
+ _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ addDefaultAllowGetPolicy(t, components.engine, contID)
+
+ req := initTestGetContainerRequest(t, contID)
+
+ err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
+ require.NoError(t, err)
+ })
+
+ t.Run("check testdomain-defined container in root-defined container target rule", func(t *testing.T) {
+ t.Parallel()
+
+ components := newTestAPEServer()
+ contID, testContainer := initTestContainer(t, true)
+ components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
+ initTestNetmap(components.netmap)
+
+ _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ addDefaultAllowGetPolicy(t, components.engine, contID)
+
+ req := initTestGetContainerRequest(t, contID)
+
+ err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
+ require.NoError(t, err)
+ })
+
+ t.Run("check testdomain-defined container in testdomain-defined container target rule", func(t *testing.T) {
+ t.Parallel()
+
+ components := newTestAPEServer()
+ contID, testContainer := initTestContainer(t, true)
+ components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
+ initTestNetmap(components.netmap)
+
+ _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, testDomainName, contID.EncodeToString()),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ addDefaultAllowGetPolicy(t, components.engine, contID)
+
+ req := initTestGetContainerRequest(t, contID)
+
+ err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
+ aErr := apeErr(nativeschema.MethodGetContainer, chain.AccessDenied)
+ require.ErrorContains(t, err, aErr.Error())
+ })
+
+ t.Run("check testdomain-defined container in testdomain namespace target rule", func(t *testing.T) {
+ t.Parallel()
+
+ components := newTestAPEServer()
+ contID, testContainer := initTestContainer(t, true)
+ components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
+ initTestNetmap(components.netmap)
+
+ _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName),
+ },
+ },
+ Condition: []chain.Condition{
+ {
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ Op: chain.CondStringEquals,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ req := initTestGetContainerRequest(t, contID)
+
+ err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
+ aErr := apeErr(nativeschema.MethodGetContainer, chain.AccessDenied)
+ require.ErrorContains(t, err, aErr.Error())
+ })
+}
+
+func initTestGetContainerRequest(t *testing.T, contID cid.ID) *container.GetRequest {
+ req := &container.GetRequest{}
+ req.SetBody(&container.GetRequestBody{})
+ var refContID refs.ContainerID
+ contID.WriteToV2(&refContID)
+ req.GetBody().SetContainerID(&refContID)
+
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+ return req
+}
+
+func initTestNetmap(netmapStub *netmapStub) {
+ netmapStub.currentEpoch = 100
+ netmapStub.netmaps = map[uint64]*netmap.NetMap{}
+ var testNetmap netmap.NetMap
+ testNetmap.SetEpoch(netmapStub.currentEpoch)
+ testNetmap.SetNodes([]netmap.NodeInfo{{}})
+ netmapStub.netmaps[netmapStub.currentEpoch] = &testNetmap
+ netmapStub.netmaps[netmapStub.currentEpoch-1] = &testNetmap
+}
+
+func initTestContainer(t *testing.T, isDomainSet bool) (cid.ID, cnrSDK.Container) {
+ contID := cidtest.ID()
+ testContainer := containertest.Container()
+ pp := netmap.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("REP 1"))
+ testContainer.SetPlacementPolicy(pp)
+ if isDomainSet {
+ // no domain defined -> container is defined in root namespace
+ var domain cnrSDK.Domain
+ domain.SetName(testDomainName)
+ domain.SetZone(testDomainZone)
+ cnrSDK.WriteDomain(&testContainer, domain)
+ }
+ return contID, testContainer
+}
+
+func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.PutRequest {
+ req := &container.PutRequest{}
+ req.SetBody(&container.PutRequestBody{})
+ var reqCont container.Container
+ testContainer.WriteToV2(&reqCont)
+ req.GetBody().SetContainer(&reqCont)
+
+ sessionPK, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ sToken := sessiontest.ContainerSigned()
+ sToken.ApplyOnlyTo(cid.ID{})
+ require.NoError(t, sToken.Sign(sessionPK.PrivateKey))
+ var sTokenV2 session.Token
+ sToken.WriteToV2(&sTokenV2)
+ metaHeader := new(session.RequestMetaHeader)
+ metaHeader.SetSessionToken(&sTokenV2)
+ req.SetMetaHeader(metaHeader)
+
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
+
+ return req
+}
+
+func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 {
+ var ownerSDK *user.ID
+ owner := testContainer.Owner()
+ ownerSDK = &owner
+ return ownerSDK.ScriptHash()
+}
+
+func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) {
+ var actorUserID user.ID
+ user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey()))
+ actorScriptHash = actorUserID.ScriptHash()
+
+ var ownerUserID user.ID
+ user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey()))
+ ownerScriptHash = ownerUserID.ScriptHash()
+ require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String())
+ return
+}
+
+func initListRequest(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) *container.ListRequest {
+ var ownerUserID user.ID
+ user.IDFromKey(&ownerUserID, ownerPK.PrivateKey.PublicKey)
+
+ req := &container.ListRequest{}
+ req.SetBody(&container.ListRequestBody{})
+ var ownerID refs.OwnerID
+ ownerUserID.WriteToV2(&ownerID)
+ req.GetBody().SetOwnerID(&ownerID)
+
+ require.NoError(t, signature.SignServiceMessage(&actorPK.PrivateKey, req))
+ return req
+}
+
+func addDefaultAllowGetPolicy(t *testing.T, e engine.Engine, contID cid.ID) {
+ _, _, err := e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{
+ Names: []string{
+ nativeschema.MethodGetContainer,
+ },
+ },
+ Resources: chain.Resources{
+ Names: []string{
+ nativeschema.ResourceFormatAllContainers,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+}
diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go
new file mode 100644
index 0000000000..b235efa3c5
--- /dev/null
+++ b/pkg/services/container/audit.go
@@ -0,0 +1,86 @@
+package container
+
+import (
+ "context"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ container_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+var _ Server = (*auditService)(nil)
+
+type auditService struct {
+ next Server
+ log *logger.Logger
+ enabled *atomic.Bool
+}
+
+func NewAuditService(next Server, log *logger.Logger, enabled *atomic.Bool) Server {
+ return &auditService{
+ next: next,
+ log: log,
+ enabled: enabled,
+ }
+}
+
+// Delete implements Server.
+func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
+ res, err := a.next.Delete(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
+
+ return res, err
+}
+
+// Get implements Server.
+func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
+ res, err := a.next.Get(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
+ return res, err
+}
+
+// List implements Server.
+func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
+ res, err := a.next.List(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
+ return res, err
+}
+
+// ListStream implements Server.
+func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ err := a.next.ListStream(req, stream)
+ if !a.enabled.Load() {
+ return err
+ }
+ audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
+ return err
+}
+
+// Put implements Server.
+func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
+ res, err := a.next.Put(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+ audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req,
+ audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil)
+ return res, err
+}
diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go
index b4705d2587..cdd0d25142 100644
--- a/pkg/services/container/executor.go
+++ b/pkg/services/container/executor.go
@@ -4,8 +4,9 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
type ServiceExecutor interface {
@@ -13,20 +14,22 @@ type ServiceExecutor interface {
Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error)
Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error)
List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error)
- SetExtendedACL(context.Context, *session.Token, *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error)
- GetExtendedACL(context.Context, *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error)
+ ListStream(context.Context, *container.ListStreamRequest, ListStream) error
}
type executorSvc struct {
Server
exec ServiceExecutor
+
+ respSvc *response.Service
}
// NewExecutionService wraps ServiceExecutor and returns Container Service interface.
-func NewExecutionService(exec ServiceExecutor) Server {
+func NewExecutionService(exec ServiceExecutor, respSvc *response.Service) Server {
return &executorSvc{
- exec: exec,
+ exec: exec,
+ respSvc: respSvc,
}
}
@@ -44,6 +47,7 @@ func (s *executorSvc) Put(ctx context.Context, req *container.PutRequest) (*cont
resp := new(container.PutResponse)
resp.SetBody(respBody)
+ s.respSvc.SetMeta(resp)
return resp, nil
}
@@ -61,6 +65,7 @@ func (s *executorSvc) Delete(ctx context.Context, req *container.DeleteRequest)
resp := new(container.DeleteResponse)
resp.SetBody(respBody)
+ s.respSvc.SetMeta(resp)
return resp, nil
}
@@ -73,6 +78,7 @@ func (s *executorSvc) Get(ctx context.Context, req *container.GetRequest) (*cont
resp := new(container.GetResponse)
resp.SetBody(respBody)
+ s.respSvc.SetMeta(resp)
return resp, nil
}
@@ -85,34 +91,14 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co
resp := new(container.ListResponse)
resp.SetBody(respBody)
+ s.respSvc.SetMeta(resp)
return resp, nil
}
-func (s *executorSvc) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
- meta := req.GetMetaHeader()
- for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
- meta = origin
- }
-
- respBody, err := s.exec.SetExtendedACL(ctx, meta.GetSessionToken(), req.GetBody())
+func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ err := s.exec.ListStream(stream.Context(), req, stream)
if err != nil {
- return nil, fmt.Errorf("could not execute SetEACL request: %w", err)
+ return fmt.Errorf("could not execute ListStream request: %w", err)
}
-
- resp := new(container.SetExtendedACLResponse)
- resp.SetBody(respBody)
-
- return resp, nil
-}
-
-func (s *executorSvc) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- respBody, err := s.exec.GetExtendedACL(ctx, req.GetBody())
- if err != nil {
- return nil, fmt.Errorf("could not execute GetEACL request: %w", err)
- }
-
- resp := new(container.GetExtendedACLResponse)
- resp.SetBody(respBody)
-
- return resp, nil
+ return nil
}
diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go
index 42035c8d08..eaa608eba1 100644
--- a/pkg/services/container/morph/executor.go
+++ b/pkg/services/container/morph/executor.go
@@ -5,17 +5,18 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
+var errMissingUserID = errors.New("missing user ID")
+
type morphExecutor struct {
rdr Reader
wrt Writer
@@ -24,22 +25,20 @@ type morphExecutor struct {
// Reader is an interface of read-only container storage.
type Reader interface {
containercore.Source
- containercore.EACLSource
- // List returns a list of container identifiers belonging
+ // ContainersOf returns a list of container identifiers belonging
// to the specified user of FrostFS system. Returns the identifiers
// of all FrostFS containers if pointer to owner identifier is nil.
- List(*user.ID) ([]cid.ID, error)
+ ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
+ IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
}
// Writer is an interface of container storage updater.
type Writer interface {
// Put stores specified container in the side chain.
- Put(containercore.Container) (*cid.ID, error)
+ Put(context.Context, containercore.Container) (*cid.ID, error)
// Delete removes specified container from the side chain.
- Delete(containercore.RemovalWitness) error
- // PutEACL updates extended ACL table of specified container in the side chain.
- PutEACL(containercore.EACL) error
+ Delete(context.Context, containercore.RemovalWitness) error
}
func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
@@ -49,10 +48,10 @@ func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
+func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
sigV2 := body.GetSignature()
if sigV2 == nil {
- // TODO(@cthulhu-rider): #1387 use "const" error
+ // TODO(@cthulhu-rider): #468 use "const" error
return nil, errors.New("missing signature")
}
@@ -82,7 +81,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con
}
}
- idCnr, err := s.wrt.Put(cnr)
+ idCnr, err := s.wrt.Put(ctx, cnr)
if err != nil {
return nil, err
}
@@ -96,7 +95,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con
return res, nil
}
-func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
+func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -109,8 +108,6 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *
return nil, fmt.Errorf("invalid container ID: %w", err)
}
- sig := body.GetSignature().GetSign()
-
var tok *session.Container
if tokV2 != nil {
@@ -124,11 +121,11 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *
var rmWitness containercore.RemovalWitness
- rmWitness.SetContainerID(id)
- rmWitness.SetSignature(sig)
- rmWitness.SetSessionToken(tok)
+ rmWitness.ContainerID = id
+ rmWitness.Signature = body.GetSignature()
+ rmWitness.SessionToken = tok
- err = s.wrt.Delete(rmWitness)
+ err = s.wrt.Delete(ctx, rmWitness)
if err != nil {
return nil, err
}
@@ -149,7 +146,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody)
return nil, fmt.Errorf("invalid container ID: %w", err)
}
- cnr, err := s.rdr.Get(id)
+ cnr, err := s.rdr.Get(ctx, id)
if err != nil {
return nil, err
}
@@ -179,7 +176,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody)
func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
- return nil, fmt.Errorf("missing user ID")
+ return nil, errMissingUserID
}
var id user.ID
@@ -189,7 +186,7 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod
return nil, fmt.Errorf("invalid user ID: %w", err)
}
- cnrs, err := s.rdr.List(&id)
+ cnrs, err := s.rdr.ContainersOf(ctx, &id)
if err != nil {
return nil, err
}
@@ -205,72 +202,55 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod
return res, nil
}
-func (s *morphExecutor) SetExtendedACL(ctx context.Context, tokV2 *sessionV2.Token, body *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error) {
- sigV2 := body.GetSignature()
- if sigV2 == nil {
- // TODO(@cthulhu-rider): #1387 use "const" error
- return nil, errors.New("missing signature")
- }
-
- eaclInfo := containercore.EACL{
- Value: eaclSDK.NewTableFromV2(body.GetEACL()),
- }
-
- err := eaclInfo.Signature.ReadFromV2(*sigV2)
- if err != nil {
- return nil, fmt.Errorf("can't read signature: %w", err)
- }
-
- if tokV2 != nil {
- eaclInfo.Session = new(session.Container)
-
- err := eaclInfo.Session.ReadFromV2(*tokV2)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
- }
-
- err = s.wrt.PutEACL(eaclInfo)
- if err != nil {
- return nil, err
- }
-
- return new(container.SetExtendedACLResponseBody), nil
-}
-
-func (s *morphExecutor) GetExtendedACL(ctx context.Context, body *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error) {
- idV2 := body.GetContainerID()
+func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error {
+ body := req.GetBody()
+ idV2 := body.GetOwnerID()
if idV2 == nil {
- return nil, errors.New("missing container ID")
+ return errMissingUserID
}
- var id cid.ID
+ var id user.ID
err := id.ReadFromV2(*idV2)
if err != nil {
- return nil, fmt.Errorf("invalid container ID: %w", err)
+ return fmt.Errorf("invalid user ID: %w", err)
}
- eaclInfo, err := s.rdr.GetEACL(id)
- if err != nil {
- return nil, err
+ resBody := new(container.ListStreamResponseBody)
+ r := new(container.ListStreamResponse)
+ r.SetBody(resBody)
+
+ var cidList []refs.ContainerID
+
+ // Amount of containers to send at once.
+ const batchSize = 1000
+
+ processCID := func(id cid.ID) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var refID refs.ContainerID
+ id.WriteToV2(&refID)
+ cidList = append(cidList, refID)
+ if len(cidList) == batchSize {
+ r.GetBody().SetContainerIDs(cidList)
+ cidList = cidList[:0]
+ return stream.Send(r)
+ }
+ return nil
}
- var sigV2 refs.Signature
- eaclInfo.Signature.WriteToV2(&sigV2)
-
- var tokV2 *sessionV2.Token
-
- if eaclInfo.Session != nil {
- tokV2 = new(sessionV2.Token)
-
- eaclInfo.Session.WriteToV2(tokV2)
+ if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil {
+ return err
}
- res := new(container.GetExtendedACLResponseBody)
- res.SetEACL(eaclInfo.Value.ToV2())
- res.SetSignature(&sigV2)
- res.SetSessionToken(tokV2)
+ if len(cidList) > 0 {
+ r.GetBody().SetContainerIDs(cidList)
+ return stream.Send(r)
+ }
- return res, nil
+ return nil
}
diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go
index a270ee856a..1f6fdb0bec 100644
--- a/pkg/services/container/morph/executor_test.go
+++ b/pkg/services/container/morph/executor_test.go
@@ -4,12 +4,12 @@ import (
"context"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
containerSvcMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test"
@@ -24,15 +24,11 @@ type mock struct {
containerSvcMorph.Reader
}
-func (m mock) Put(_ containerCore.Container) (*cid.ID, error) {
+func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) {
return new(cid.ID), nil
}
-func (m mock) Delete(_ containerCore.RemovalWitness) error {
- return nil
-}
-
-func (m mock) PutEACL(_ containerCore.EACL) error {
+func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error {
return nil
}
@@ -51,7 +47,8 @@ func TestInvalidToken(t *testing.T) {
sign := func(reqBody interface {
StableMarshal([]byte) []byte
SetSignature(signature *refs.Signature)
- }) {
+ },
+ ) {
signer := frostfsecdsa.Signer(priv.PrivateKey)
var sig frostfscrypto.Signature
require.NoError(t, sig.Calculate(signer, reqBody.StableMarshal(nil)))
@@ -95,17 +92,6 @@ func TestInvalidToken(t *testing.T) {
return
},
},
- {
- name: "setEACL",
- op: func(e containerSvc.ServiceExecutor, tokV2 *session.Token) (err error) {
- var reqBody container.SetExtendedACLRequestBody
- reqBody.SetSignature(new(refs.Signature))
- sign(&reqBody)
-
- _, err = e.SetExtendedACL(context.TODO(), tokV2, &reqBody)
- return
- },
- },
}
for _, test := range tests {
diff --git a/pkg/services/container/response.go b/pkg/services/container/response.go
deleted file mode 100644
index 1389745378..0000000000
--- a/pkg/services/container/response.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package container
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
-)
-
-type responseService struct {
- respSvc *response.Service
-
- svc Server
-}
-
-// NewResponseService returns container service instance that passes internal service
-// call to response service.
-func NewResponseService(cnrSvc Server, respSvc *response.Service) Server {
- return &responseService{
- respSvc: respSvc,
- svc: cnrSvc,
- }
-}
-
-func (s *responseService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Put(ctx, req.(*container.PutRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*container.PutResponse), nil
-}
-
-func (s *responseService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Delete(ctx, req.(*container.DeleteRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*container.DeleteResponse), nil
-}
-
-func (s *responseService) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Get(ctx, req.(*container.GetRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*container.GetResponse), nil
-}
-
-func (s *responseService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.List(ctx, req.(*container.ListRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*container.ListResponse), nil
-}
-
-func (s *responseService) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.SetExtendedACL(ctx, req.(*container.SetExtendedACLRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*container.SetExtendedACLResponse), nil
-}
-
-func (s *responseService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.GetExtendedACL(ctx, req.(*container.GetExtendedACLRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*container.GetExtendedACLResponse), nil
-}
-
-func (s *responseService) AnnounceUsedSpace(ctx context.Context, req *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceUsedSpace(ctx, req.(*container.AnnounceUsedSpaceRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*container.AnnounceUsedSpaceResponse), nil
-}
diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go
index 052a8c945c..d9208077d1 100644
--- a/pkg/services/container/server.go
+++ b/pkg/services/container/server.go
@@ -3,7 +3,8 @@ package container
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
)
// Server is an interface of the FrostFS API Container service server.
@@ -12,7 +13,11 @@ type Server interface {
Get(context.Context, *container.GetRequest) (*container.GetResponse, error)
Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error)
List(context.Context, *container.ListRequest) (*container.ListResponse, error)
- SetExtendedACL(context.Context, *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error)
- GetExtendedACL(context.Context, *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error)
- AnnounceUsedSpace(context.Context, *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error)
+ ListStream(*container.ListStreamRequest, ListStream) error
+}
+
+// ListStream is an interface of FrostFS API v2 compatible search streamer.
+type ListStream interface {
+ util.ServerStream
+ Send(*container.ListStreamResponse) error
}
diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go
index 9e77e2e214..85fe7ae873 100644
--- a/pkg/services/container/sign.go
+++ b/pkg/services/container/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
)
type signService struct {
@@ -22,113 +22,74 @@ func NewSignService(key *ecdsa.PrivateKey, svc Server) Server {
}
func (s *signService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Put(ctx, req.(*container.PutRequest))
- },
- func() util.ResponseMessage {
- return new(container.PutResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(container.PutResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*container.PutResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.Put(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
func (s *signService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Delete(ctx, req.(*container.DeleteRequest))
- },
- func() util.ResponseMessage {
- return new(container.DeleteResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(container.DeleteResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*container.DeleteResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.Delete(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
func (s *signService) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Get(ctx, req.(*container.GetRequest))
- },
- func() util.ResponseMessage {
- return new(container.GetResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(container.GetResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*container.GetResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.Get(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
func (s *signService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.List(ctx, req.(*container.ListRequest))
- },
- func() util.ResponseMessage {
- return new(container.ListResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(container.ListResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*container.ListResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *signService) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.SetExtendedACL(ctx, req.(*container.SetExtendedACLRequest))
- },
- func() util.ResponseMessage {
- return new(container.SetExtendedACLResponse)
- },
- )
- if err != nil {
- return nil, err
+func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(container.ListStreamResponse)
+ _ = s.sigSvc.SignResponse(resp, err)
+ return stream.Send(resp)
}
- return resp.(*container.SetExtendedACLResponse), nil
-}
-
-func (s *signService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.GetExtendedACL(ctx, req.(*container.GetExtendedACLRequest))
- },
- func() util.ResponseMessage {
- return new(container.GetExtendedACLResponse)
- },
- )
- if err != nil {
- return nil, err
+ ss := &listStreamSigner{
+ ListStream: stream,
+ sigSvc: s.sigSvc,
}
-
- return resp.(*container.GetExtendedACLResponse), nil
-}
-
-func (s *signService) AnnounceUsedSpace(ctx context.Context, req *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceUsedSpace(ctx, req.(*container.AnnounceUsedSpaceRequest))
- },
- func() util.ResponseMessage {
- return new(container.AnnounceUsedSpaceResponse)
- },
- )
- if err != nil {
- return nil, err
+ err := s.svc.ListStream(req, ss)
+ if err != nil || !ss.nonEmptyResp {
+ return ss.send(new(container.ListStreamResponse), err)
}
-
- return resp.(*container.AnnounceUsedSpaceResponse), nil
+ return nil
+}
+
+type listStreamSigner struct {
+ ListStream
+ sigSvc *util.SignService
+
+ nonEmptyResp bool // set on first Send call
+}
+
+func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error {
+ s.nonEmptyResp = true
+ return s.send(resp, nil)
+}
+
+func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error {
+ if err := s.sigSvc.SignResponse(resp, err); err != nil {
+ return err
+ }
+ return s.ListStream.Send(resp)
}
diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go
new file mode 100644
index 0000000000..4f8708da7e
--- /dev/null
+++ b/pkg/services/container/transport_splitter.go
@@ -0,0 +1,92 @@
+package container
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+)
+
+type (
+ TransportSplitter struct {
+ next Server
+
+ respSvc *response.Service
+ cnrAmount uint32
+ }
+
+ listStreamMsgSizeCtrl struct {
+ util.ServerStream
+ stream ListStream
+ respSvc *response.Service
+ cnrAmount uint32
+ }
+)
+
+func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server {
+ return &TransportSplitter{
+ next: next,
+ respSvc: respSvc,
+ cnrAmount: cnrAmount,
+ }
+}
+
+func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
+ return s.next.Put(ctx, req)
+}
+
+func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
+ return s.next.Delete(ctx, req)
+}
+
+func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
+ return s.next.Get(ctx, req)
+}
+
+func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
+ return s.next.List(ctx, req)
+}
+
+func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+ return s.next.ListStream(req, &listStreamMsgSizeCtrl{
+ ServerStream: stream,
+ stream: stream,
+ respSvc: s.respSvc,
+ cnrAmount: s.cnrAmount,
+ })
+}
+
+func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error {
+ s.respSvc.SetMeta(resp)
+ body := resp.GetBody()
+ ids := body.GetContainerIDs()
+
+ var newResp *container.ListStreamResponse
+
+ for {
+ if newResp == nil {
+ newResp = new(container.ListStreamResponse)
+ newResp.SetBody(body)
+ }
+
+ cut := min(s.cnrAmount, uint32(len(ids)))
+
+ body.SetContainerIDs(ids[:cut])
+ newResp.SetMetaHeader(resp.GetMetaHeader())
+ newResp.SetVerificationHeader(resp.GetVerificationHeader())
+
+ if err := s.stream.Send(newResp); err != nil {
+ return fmt.Errorf("TransportSplitter: %w", err)
+ }
+
+ ids = ids[cut:]
+
+ if len(ids) == 0 {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/services/control/common_test.go b/pkg/services/control/common_test.go
deleted file mode 100644
index bc512b4bec..0000000000
--- a/pkg/services/control/common_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package control_test
-
-import (
- "crypto/rand"
- "testing"
-
- "github.com/mr-tron/base58"
- "github.com/stretchr/testify/require"
- "google.golang.org/protobuf/proto"
-)
-
-type protoMessage interface {
- StableMarshal([]byte) []byte
- proto.Message
-}
-
-func testStableMarshal(t *testing.T, m1, m2 protoMessage, cmp func(m1, m2 protoMessage) bool) {
- require.NoError(t, proto.Unmarshal(m1.StableMarshal(nil), m2))
-
- require.True(t, cmp(m1, m2))
-}
-
-func testData(sz int) []byte {
- d := make([]byte, sz)
-
- _, _ = rand.Read(d)
-
- return d
-}
-
-func testString() string {
- return base58.Encode(testData(10))
-}
diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go
index 833288bb76..37daf67be5 100644
--- a/pkg/services/control/convert.go
+++ b/pkg/services/control/convert.go
@@ -1,8 +1,8 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message"
)
type requestWrapper struct {
@@ -14,189 +14,26 @@ func (w *requestWrapper) ToGRPCMessage() grpc.Message {
return w.m
}
-type healthCheckResponseWrapper struct {
- m *HealthCheckResponse
+type responseWrapper[T grpc.Message] struct {
+ message *T
}
-func (w *healthCheckResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.m
+func newResponseWrapper[T grpc.Message]() *responseWrapper[T] {
+ return &responseWrapper[T]{
+ message: new(T),
+ }
}
-func (w *healthCheckResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- var ok bool
+func (w *responseWrapper[T]) ToGRPCMessage() grpc.Message {
+ return w.message
+}
- w.m, ok = m.(*HealthCheckResponse)
+func (w *responseWrapper[T]) FromGRPCMessage(m grpc.Message) error {
+ response, ok := m.(*T)
if !ok {
- return message.NewUnexpectedMessageType(m, w.m)
+ return message.NewUnexpectedMessageType(m, w.message)
}
- return nil
-}
-
-type setNetmapStatusResponseWrapper struct {
- message.Message
- m *SetNetmapStatusResponse
-}
-
-func (w *setNetmapStatusResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.m
-}
-
-func (w *setNetmapStatusResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- var ok bool
-
- w.m, ok = m.(*SetNetmapStatusResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, w.m)
- }
-
- return nil
-}
-
-type dropObjectsResponseWrapper struct {
- message.Message
- m *DropObjectsResponse
-}
-
-func (w *dropObjectsResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.m
-}
-
-func (w *dropObjectsResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- var ok bool
-
- w.m, ok = m.(*DropObjectsResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, w.m)
- }
-
- return nil
-}
-
-type listShardsResponseWrapper struct {
- m *ListShardsResponse
-}
-
-func (w *listShardsResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.m
-}
-
-func (w *listShardsResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- var ok bool
-
- w.m, ok = m.(*ListShardsResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, w.m)
- }
-
- return nil
-}
-
-type setShardModeResponseWrapper struct {
- m *SetShardModeResponse
-}
-
-func (w *setShardModeResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.m
-}
-
-func (w *setShardModeResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- var ok bool
-
- w.m, ok = m.(*SetShardModeResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, w.m)
- }
-
- return nil
-}
-
-type dumpShardResponseWrapper struct {
- *DumpShardResponse
-}
-
-func (w *dumpShardResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.DumpShardResponse
-}
-
-func (w *dumpShardResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- r, ok := m.(*DumpShardResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, (*DumpShardResponse)(nil))
- }
-
- w.DumpShardResponse = r
- return nil
-}
-
-type restoreShardResponseWrapper struct {
- *RestoreShardResponse
-}
-
-func (w *restoreShardResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.RestoreShardResponse
-}
-
-func (w *restoreShardResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- r, ok := m.(*RestoreShardResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, (*RestoreShardResponse)(nil))
- }
-
- w.RestoreShardResponse = r
- return nil
-}
-
-type synchronizeTreeResponseWrapper struct {
- *SynchronizeTreeResponse
-}
-
-func (w *synchronizeTreeResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.SynchronizeTreeResponse
-}
-
-func (w *synchronizeTreeResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- r, ok := m.(*SynchronizeTreeResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, (*SynchronizeTreeResponse)(nil))
- }
-
- w.SynchronizeTreeResponse = r
- return nil
-}
-
-type evacuateShardResponseWrapper struct {
- *EvacuateShardResponse
-}
-
-func (w *evacuateShardResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.EvacuateShardResponse
-}
-
-func (w *evacuateShardResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- r, ok := m.(*EvacuateShardResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, (*EvacuateShardResponse)(nil))
- }
-
- w.EvacuateShardResponse = r
- return nil
-}
-
-type flushCacheResponseWrapper struct {
- *FlushCacheResponse
-}
-
-func (w *flushCacheResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.FlushCacheResponse
-}
-
-func (w *flushCacheResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- r, ok := m.(*FlushCacheResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, (*FlushCacheResponse)(nil))
- }
-
- w.FlushCacheResponse = r
+ w.message = response
return nil
}
diff --git a/pkg/services/control/ir/convert.go b/pkg/services/control/ir/convert.go
index 01bc487241..024676b87a 100644
--- a/pkg/services/control/ir/convert.go
+++ b/pkg/services/control/ir/convert.go
@@ -1,8 +1,8 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message"
)
type requestWrapper struct {
@@ -14,18 +14,18 @@ func (w *requestWrapper) ToGRPCMessage() grpc.Message {
return w.m
}
-type healthCheckResponseWrapper struct {
- m *HealthCheckResponse
+type responseWrapper[M grpc.Message] struct {
+ m M
}
-func (w *healthCheckResponseWrapper) ToGRPCMessage() grpc.Message {
+func (w *responseWrapper[M]) ToGRPCMessage() grpc.Message {
return w.m
}
-func (w *healthCheckResponseWrapper) FromGRPCMessage(m grpc.Message) error {
+func (w *responseWrapper[M]) FromGRPCMessage(m grpc.Message) error {
var ok bool
- w.m, ok = m.(*HealthCheckResponse)
+ w.m, ok = m.(M)
if !ok {
return message.NewUnexpectedMessageType(m, w.m)
}
diff --git a/pkg/services/control/ir/rpc.go b/pkg/services/control/ir/rpc.go
index a8b16b6079..62f800d999 100644
--- a/pkg/services/control/ir/rpc.go
+++ b/pkg/services/control/ir/rpc.go
@@ -1,14 +1,18 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
)
const serviceName = "ircontrol.ControlService"
const (
- rpcHealthCheck = "HealthCheck"
+ rpcHealthCheck = "HealthCheck"
+ rpcTickEpoch = "TickEpoch"
+ rpcRemoveNode = "RemoveNode"
+ rpcRemoveContainer = "RemoveContainer"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -17,15 +21,45 @@ func HealthCheck(
req *HealthCheckRequest,
opts ...client.CallOption,
) (*HealthCheckResponse, error) {
- wResp := &healthCheckResponseWrapper{
- m: new(HealthCheckResponse),
+ return sendUnary[HealthCheckRequest, HealthCheckResponse](cli, rpcHealthCheck, req, opts...)
+}
+
+// TickEpoch executes ControlService.TickEpoch RPC.
+func TickEpoch(
+ cli *client.Client,
+ req *TickEpochRequest,
+ opts ...client.CallOption,
+) (*TickEpochResponse, error) {
+ return sendUnary[TickEpochRequest, TickEpochResponse](cli, rpcTickEpoch, req, opts...)
+}
+
+func RemoveNode(
+ cli *client.Client,
+ req *RemoveNodeRequest,
+ opts ...client.CallOption,
+) (*RemoveNodeResponse, error) {
+ return sendUnary[RemoveNodeRequest, RemoveNodeResponse](cli, rpcRemoveNode, req, opts...)
+}
+
+func RemoveContainer(
+ cli *client.Client,
+ req *RemoveContainerRequest,
+ opts ...client.CallOption,
+) (*RemoveContainerResponse, error) {
+ return sendUnary[RemoveContainerRequest, RemoveContainerResponse](cli, rpcRemoveContainer, req, opts...)
+}
+
+func sendUnary[I, O grpc.Message](cli *client.Client, rpcName string, req *I, opts ...client.CallOption) (*O, error) {
+ var resp O
+ wResp := &responseWrapper[*O]{
+ m: &resp,
}
wReq := &requestWrapper{
m: req,
}
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcHealthCheck), wReq, wResp, opts...)
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcName), wReq, wResp, opts...)
if err != nil {
return nil, err
}
diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go
new file mode 100644
index 0000000000..d9f65a2fc1
--- /dev/null
+++ b/pkg/services/control/ir/server/audit.go
@@ -0,0 +1,108 @@
+package control
+
+import (
+ "context"
+ "encoding/hex"
+ "strings"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
+ control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+var _ control.ControlServiceServer = (*auditService)(nil)
+
+type auditService struct {
+ next *Server
+ log *logger.Logger
+ enabled *atomic.Bool
+}
+
+func NewAuditService(next *Server, log *logger.Logger, enabled *atomic.Bool) control.ControlServiceServer {
+ return &auditService{
+ next: next,
+ log: log,
+ enabled: enabled,
+ }
+}
+
+// HealthCheck implements control.ControlServiceServer.
+func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheckRequest) (*control.HealthCheckResponse, error) {
+ res, err := a.next.HealthCheck(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
+ return res, err
+}
+
+// RemoveContainer implements control.ControlServiceServer.
+func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
+ res, err := a.next.RemoveContainer(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+
+ sb := &strings.Builder{}
+ var withConatiner bool
+ if len(req.GetBody().GetContainerId()) > 0 {
+ withConatiner = true
+ sb.WriteString("containerID:")
+ var containerID cid.ID
+ if err := containerID.Decode(req.GetBody().GetContainerId()); err != nil {
+ sb.WriteString(audit.InvalidValue)
+ } else {
+ sb.WriteString(containerID.EncodeToString())
+ }
+ }
+
+ if len(req.GetBody().GetOwner()) > 0 {
+ if withConatiner {
+ sb.WriteString(";")
+ }
+ sb.WriteString("owner:")
+
+ var ownerID refs.OwnerID
+ if err := ownerID.Unmarshal(req.GetBody().GetOwner()); err != nil {
+ sb.WriteString(audit.InvalidValue)
+ } else {
+ var owner user.ID
+ if err := owner.ReadFromV2(ownerID); err != nil {
+ sb.WriteString(audit.InvalidValue)
+ } else {
+ sb.WriteString(owner.EncodeToString())
+ }
+ }
+ }
+
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
+ return res, err
+}
+
+// RemoveNode implements control.ControlServiceServer.
+func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
+ res, err := a.next.RemoveNode(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
+ audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil)
+ return res, err
+}
+
+// TickEpoch implements control.ControlServiceServer.
+func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
+ res, err := a.next.TickEpoch(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+
+ audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
+ nil, err == nil)
+ return res, err
+}
diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go
index 986da90f1b..0509d2646c 100644
--- a/pkg/services/control/ir/server/calls.go
+++ b/pkg/services/control/ir/server/calls.go
@@ -1,9 +1,16 @@
package control
import (
+ "bytes"
"context"
+ "fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -12,12 +19,10 @@ import (
//
// If request is not signed with a key from white list, permission error returns.
func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest) (*control.HealthCheckResponse, error) {
- // verify request
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
- // create and fill response
resp := new(control.HealthCheckResponse)
body := new(control.HealthCheckResponse_Body)
@@ -25,10 +30,147 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest)
body.SetHealthStatus(s.prm.healthChecker.HealthStatus())
- // sign the response
if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return resp, nil
}
+
+// TickEpoch forces a new epoch.
+//
+// If request is not signed with a key from white list, permission error returns.
+func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ resp := new(control.TickEpochResponse)
+ resp.SetBody(new(control.TickEpochResponse_Body))
+
+ epoch, err := s.netmapClient.Epoch(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("getting current epoch: %w", err)
+ }
+
+ vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub())
+ if err != nil {
+ return nil, fmt.Errorf("forcing new epoch: %w", err)
+ }
+ resp.Body.Vub = vub
+
+ if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ return resp, nil
+}
+
+// RemoveNode forces a node removal.
+//
+// If request is not signed with a key from white list, permission error returns.
+func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ resp := new(control.RemoveNodeResponse)
+ resp.SetBody(new(control.RemoveNodeResponse_Body))
+
+ nm, err := s.netmapClient.NetMap(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("getting netmap: %w", err)
+ }
+ var nodeInfo netmap.NodeInfo
+ for _, info := range nm.Nodes() {
+ if bytes.Equal(info.PublicKey(), req.GetBody().GetKey()) {
+ nodeInfo = info
+ break
+ }
+ }
+ if len(nodeInfo.PublicKey()) == 0 {
+ return nil, status.Error(codes.NotFound, "no such node")
+ }
+ if nodeInfo.Status().IsOffline() {
+ return nil, status.Error(codes.FailedPrecondition, "node is already offline")
+ }
+
+ vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub())
+ if err != nil {
+ return nil, fmt.Errorf("forcing node removal: %w", err)
+ }
+ resp.Body.Vub = vub
+
+ if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ return resp, nil
+}
+
+// RemoveContainer forces a container removal.
+func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ if len(req.GetBody().GetContainerId()) > 0 && len(req.GetBody().GetOwner()) > 0 {
+ return nil, status.Error(codes.InvalidArgument, "specify the owner and container at the same time is not allowed")
+ }
+ var vub uint32
+ if len(req.GetBody().GetContainerId()) > 0 {
+ var containerID cid.ID
+ if err := containerID.Decode(req.GetBody().GetContainerId()); err != nil {
+ return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error())
+ }
+ var err error
+ vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ var ownerID refs.OwnerID
+ if err := ownerID.Unmarshal(req.GetBody().GetOwner()); err != nil {
+ return nil, status.Error(codes.InvalidArgument, "failed to parse ownerID: %s"+err.Error())
+ }
+ var owner user.ID
+ if err := owner.ReadFromV2(ownerID); err != nil {
+ return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error())
+ }
+
+ cids, err := s.containerClient.ContainersOf(ctx, &owner)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get owner's containers: %w", err)
+ }
+
+ for _, containerID := range cids {
+ vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ resp := &control.RemoveContainerResponse{
+ Body: &control.RemoveContainerResponse_Body{
+ Vub: vub,
+ },
+ }
+ if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) {
+ var prm container.DeletePrm
+ prm.SetCID(containerID[:])
+ prm.SetControlTX(true)
+ prm.SetVUB(vub)
+
+ vub, err := s.containerClient.Delete(ctx, prm)
+ if err != nil {
+ return 0, fmt.Errorf("forcing container removal: %w", err)
+ }
+ return vub, nil
+}
diff --git a/pkg/services/control/ir/server/deps.go b/pkg/services/control/ir/server/deps.go
index 0c2de53006..9d5cfefc8e 100644
--- a/pkg/services/control/ir/server/deps.go
+++ b/pkg/services/control/ir/server/deps.go
@@ -5,7 +5,7 @@ import control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // Must calculate and return current health status of the IR application.
+ // HealthStatus must calculate and return current health status of the IR application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go
index c75c1504e1..0cfca71c15 100644
--- a/pkg/services/control/ir/server/server.go
+++ b/pkg/services/control/ir/server/server.go
@@ -2,6 +2,9 @@ package control
import (
"fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
)
// Server is an entity that serves
@@ -10,9 +13,10 @@ import (
// To gain access to the service, any request must be
// signed with a key from the white list.
type Server struct {
- prm Prm
-
- allowedKeys [][]byte
+ prm Prm
+ netmapClient *netmap.Client
+ containerClient *container.Client
+ allowedKeys [][]byte
}
func panicOnPrmValue(n string, v any) {
@@ -29,10 +33,9 @@ func panicOnPrmValue(n string, v any) {
// Forms white list from all keys specified via
// WithAllowedKeys option and a public key of
// the parameterized private key.
-func New(prm Prm, opts ...Option) *Server {
+func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server {
// verify required parameters
- switch {
- case prm.healthChecker == nil:
+ if prm.healthChecker == nil {
panicOnPrmValue("health checker", prm.healthChecker)
}
@@ -44,7 +47,9 @@ func New(prm Prm, opts ...Option) *Server {
}
return &Server{
- prm: prm,
+ prm: prm,
+ netmapClient: netmapClient,
+ containerClient: containerClient,
allowedKeys: append(o.allowedKeys, prm.key.PublicKey().Bytes()),
}
diff --git a/pkg/services/control/ir/server/sign.go b/pkg/services/control/ir/server/sign.go
index 4ada984682..d39f6d5f95 100644
--- a/pkg/services/control/ir/server/sign.go
+++ b/pkg/services/control/ir/server/sign.go
@@ -6,8 +6,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
)
@@ -24,7 +24,7 @@ var errDisallowedKey = errors.New("key is not in the allowed list")
func (s *Server) isValidRequest(req SignedMessage) error {
sign := req.GetSignature()
if sign == nil {
- // TODO(@cthulhu-rider): #1387 use "const" error
+ // TODO(@cthulhu-rider): #468 use "const" error
return errors.New("missing signature")
}
@@ -50,7 +50,7 @@ func (s *Server) isValidRequest(req SignedMessage) error {
return fmt.Errorf("marshal request body: %w", err)
}
- // TODO(@cthulhu-rider): #1387 use Signature message from NeoFS API to avoid conversion
+ // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion
var sigV2 refs.Signature
sigV2.SetKey(sign.GetKey())
sigV2.SetSign(sign.GetSign())
@@ -62,7 +62,7 @@ func (s *Server) isValidRequest(req SignedMessage) error {
}
if !sig.Verify(binBody) {
- // TODO(@cthulhu-rider): #1387 use "const" error
+ // TODO(@cthulhu-rider): #468 use "const" error
return errors.New("invalid signature")
}
@@ -83,7 +83,7 @@ func SignMessage(key *ecdsa.PrivateKey, msg SignedMessage) error {
return fmt.Errorf("calculate signature: %w", err)
}
- // TODO(@cthulhu-rider): #1387 use Signature message from NeoFS API to avoid conversion
+ // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion
var sigV2 refs.Signature
sig.WriteToV2(&sigV2)
diff --git a/pkg/services/control/ir/service.go b/pkg/services/control/ir/service.go
deleted file mode 100644
index dc04e49043..0000000000
--- a/pkg/services/control/ir/service.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package control
-
-// SetBody sets health check request body.
-func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetHealthStatus sets health status of the IR application.
-func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
- if x != nil {
- x.HealthStatus = v
- }
-}
-
-// SetBody sets health check response body.
-func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
diff --git a/pkg/services/control/ir/service.pb.go b/pkg/services/control/ir/service.pb.go
deleted file mode 100644
index 9f28347065..0000000000
--- a/pkg/services/control/ir/service.pb.go
+++ /dev/null
@@ -1,382 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.21.12
-// source: pkg/services/control/ir/service.proto
-
-package control
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Health check request.
-type HealthCheckRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of health check request message.
- Body *HealthCheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- // Should be signed by node key or one of
- // the keys configured by the node.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthCheckRequest) Reset() {
- *x = HealthCheckRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckRequest) ProtoMessage() {}
-
-func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
-func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthCheckRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Health check response.
-type HealthCheckResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of health check response message.
- Body *HealthCheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthCheckResponse) Reset() {
- *x = HealthCheckResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckResponse) ProtoMessage() {}
-
-func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
-func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthCheckResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Health check request body.
-type HealthCheckRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *HealthCheckRequest_Body) Reset() {
- *x = HealthCheckRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckRequest_Body) ProtoMessage() {}
-
-func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckRequest_Body.ProtoReflect.Descriptor instead.
-func (*HealthCheckRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// Health check response body
-type HealthCheckResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Health status of IR node application.
- HealthStatus HealthStatus `protobuf:"varint,1,opt,name=health_status,json=healthStatus,proto3,enum=ircontrol.HealthStatus" json:"health_status,omitempty"`
-}
-
-func (x *HealthCheckResponse_Body) Reset() {
- *x = HealthCheckResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckResponse_Body) ProtoMessage() {}
-
-func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckResponse_Body.ProtoReflect.Descriptor instead.
-func (*HealthCheckResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{1, 0}
-}
-
-func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
- if x != nil {
- return x.HealthStatus
- }
- return HealthStatus_HEALTH_STATUS_UNDEFINED
-}
-
-var File_pkg_services_control_ir_service_proto protoreflect.FileDescriptor
-
-var file_pkg_services_control_ir_service_proto_rawDesc = []byte{
- 0x0a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x1a, 0x23, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x74, 0x79, 0x70, 0x65,
- 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69,
- 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
- 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79,
- 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f,
- 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x44, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x3c, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
- 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x32, 0x5e, 0x0a,
- 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
- 0x4c, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1d,
- 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e,
- 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x44, 0x5a,
- 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66,
- 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66,
- 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_control_ir_service_proto_rawDescOnce sync.Once
- file_pkg_services_control_ir_service_proto_rawDescData = file_pkg_services_control_ir_service_proto_rawDesc
-)
-
-func file_pkg_services_control_ir_service_proto_rawDescGZIP() []byte {
- file_pkg_services_control_ir_service_proto_rawDescOnce.Do(func() {
- file_pkg_services_control_ir_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_ir_service_proto_rawDescData)
- })
- return file_pkg_services_control_ir_service_proto_rawDescData
-}
-
-var file_pkg_services_control_ir_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_pkg_services_control_ir_service_proto_goTypes = []interface{}{
- (*HealthCheckRequest)(nil), // 0: ircontrol.HealthCheckRequest
- (*HealthCheckResponse)(nil), // 1: ircontrol.HealthCheckResponse
- (*HealthCheckRequest_Body)(nil), // 2: ircontrol.HealthCheckRequest.Body
- (*HealthCheckResponse_Body)(nil), // 3: ircontrol.HealthCheckResponse.Body
- (*Signature)(nil), // 4: ircontrol.Signature
- (HealthStatus)(0), // 5: ircontrol.HealthStatus
-}
-var file_pkg_services_control_ir_service_proto_depIdxs = []int32{
- 2, // 0: ircontrol.HealthCheckRequest.body:type_name -> ircontrol.HealthCheckRequest.Body
- 4, // 1: ircontrol.HealthCheckRequest.signature:type_name -> ircontrol.Signature
- 3, // 2: ircontrol.HealthCheckResponse.body:type_name -> ircontrol.HealthCheckResponse.Body
- 4, // 3: ircontrol.HealthCheckResponse.signature:type_name -> ircontrol.Signature
- 5, // 4: ircontrol.HealthCheckResponse.Body.health_status:type_name -> ircontrol.HealthStatus
- 0, // 5: ircontrol.ControlService.HealthCheck:input_type -> ircontrol.HealthCheckRequest
- 1, // 6: ircontrol.ControlService.HealthCheck:output_type -> ircontrol.HealthCheckResponse
- 6, // [6:7] is the sub-list for method output_type
- 5, // [5:6] is the sub-list for method input_type
- 5, // [5:5] is the sub-list for extension type_name
- 5, // [5:5] is the sub-list for extension extendee
- 0, // [0:5] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_control_ir_service_proto_init() }
-func file_pkg_services_control_ir_service_proto_init() {
- if File_pkg_services_control_ir_service_proto != nil {
- return
- }
- file_pkg_services_control_ir_types_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_control_ir_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_control_ir_service_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 4,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_pkg_services_control_ir_service_proto_goTypes,
- DependencyIndexes: file_pkg_services_control_ir_service_proto_depIdxs,
- MessageInfos: file_pkg_services_control_ir_service_proto_msgTypes,
- }.Build()
- File_pkg_services_control_ir_service_proto = out.File
- file_pkg_services_control_ir_service_proto_rawDesc = nil
- file_pkg_services_control_ir_service_proto_goTypes = nil
- file_pkg_services_control_ir_service_proto_depIdxs = nil
-}
diff --git a/pkg/services/control/ir/service.proto b/pkg/services/control/ir/service.proto
index 5f99be16b7..fa58db568b 100644
--- a/pkg/services/control/ir/service.proto
+++ b/pkg/services/control/ir/service.proto
@@ -6,38 +6,108 @@ import "pkg/services/control/ir/types.proto";
option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/ir/control";
-// `ControlService` provides an interface for internal work with the Inner Ring node.
+// `ControlService` provides an interface for internal work with the Inner Ring
+// node.
service ControlService {
- // Performs health check of the IR node.
- rpc HealthCheck (HealthCheckRequest) returns (HealthCheckResponse);
+ // Performs health check of the IR node.
+ rpc HealthCheck(HealthCheckRequest) returns (HealthCheckResponse);
+ // Forces a new epoch to be signaled by the IR node with high probability.
+ rpc TickEpoch(TickEpochRequest) returns (TickEpochResponse);
+ // Forces a node removal to be signaled by the IR node with high probability.
+ rpc RemoveNode(RemoveNodeRequest) returns (RemoveNodeResponse);
+ // Forces a container removal to be signaled by the IR node with high
+ // probability.
+ rpc RemoveContainer(RemoveContainerRequest) returns (RemoveContainerResponse);
}
// Health check request.
message HealthCheckRequest {
- // Health check request body.
- message Body {
- }
+ // Health check request body.
+ message Body {}
- // Body of health check request message.
- Body body = 1;
+ // Body of health check request message.
+ Body body = 1;
- // Body signature.
- // Should be signed by node key or one of
- // the keys configured by the node.
- Signature signature = 2;
+ // Body signature.
+ // Should be signed by node key or one of
+ // the keys configured by the node.
+ Signature signature = 2;
}
// Health check response.
message HealthCheckResponse {
- // Health check response body
- message Body {
- // Health status of IR node application.
- HealthStatus health_status = 1;
- }
+ // Health check response body
+ message Body {
+ // Health status of IR node application.
+ HealthStatus health_status = 1;
+ }
- // Body of health check response message.
- Body body = 1;
+ // Body of health check response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
+}
+
+message TickEpochRequest {
+ message Body {
+ // Valid until block value override.
+ uint32 vub = 1;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message TickEpochResponse {
+ message Body {
+ // Valid until block value for transaction.
+ uint32 vub = 1;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message RemoveNodeRequest {
+ message Body {
+ bytes key = 1;
+ // Valid until block value override.
+ uint32 vub = 2;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message RemoveNodeResponse {
+ message Body {
+ // Valid until block value for transaction.
+ uint32 vub = 1;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message RemoveContainerRequest {
+ message Body {
+ bytes container_id = 1;
+ bytes owner = 2;
+ // Valid until block value override.
+ uint32 vub = 3;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message RemoveContainerResponse {
+ message Body {
+ // Valid until block value for transaction.
+ uint32 vub = 1;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
}
diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go
index f6dd94b3a0..d277462634 100644
--- a/pkg/services/control/ir/service_frostfs.pb.go
+++ b/pkg/services/control/ir/service_frostfs.pb.go
@@ -2,57 +2,137 @@
package control
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
+)
+
+type HealthCheckRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil)
+ _ json.Marshaler = (*HealthCheckRequest_Body)(nil)
+ _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthCheckRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckRequest struct {
+ Body *HealthCheckRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil)
+ _ json.Marshaler = (*HealthCheckRequest)(nil)
+ _ json.Unmarshaler = (*HealthCheckRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthCheckRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -68,71 +148,344 @@ func (x *HealthCheckRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthCheckRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthCheckRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
+ x.Body = v
+}
+func (x *HealthCheckRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthCheckRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthCheckRequest_Body
+ f = new(HealthCheckRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckResponse_Body struct {
+ HealthStatus HealthStatus `json:"healthStatus"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil)
+ _ json.Marshaler = (*HealthCheckResponse_Body)(nil)
+ _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthCheckResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.EnumSize(1, int32(x.HealthStatus))
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.EnumMarshal(1, buf[offset:], int32(x.HealthStatus))
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.HealthStatus) != 0 {
+ mm.AppendInt32(1, int32(x.HealthStatus))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // HealthStatus
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "HealthStatus")
+ }
+ x.HealthStatus = HealthStatus(data)
+ }
+ }
+ return nil
+}
+func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
+ if x != nil {
+ return x.HealthStatus
+ }
+ return 0
+}
+func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
+ x.HealthStatus = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"healthStatus\":"
+ out.RawString(prefix)
+ v := int32(x.HealthStatus)
+ if vv, ok := HealthStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "healthStatus":
+ {
+ var f HealthStatus
+ var parsedValue HealthStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := HealthStatus_value[v]; ok {
+ parsedValue = HealthStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = HealthStatus(vv)
+ case float64:
+ parsedValue = HealthStatus(v)
+ }
+ f = parsedValue
+ x.HealthStatus = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckResponse struct {
+ Body *HealthCheckResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil)
+ _ json.Marshaler = (*HealthCheckResponse)(nil)
+ _ json.Unmarshaler = (*HealthCheckResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthCheckResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -148,9 +501,2338 @@ func (x *HealthCheckResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthCheckResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthCheckResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
+ x.Body = v
+}
+func (x *HealthCheckResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthCheckResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthCheckResponse_Body
+ f = new(HealthCheckResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TickEpochRequest_Body struct {
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TickEpochRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*TickEpochRequest_Body)(nil)
+ _ json.Marshaler = (*TickEpochRequest_Body)(nil)
+ _ json.Unmarshaler = (*TickEpochRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *TickEpochRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.UInt32Size(1, x.Vub)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TickEpochRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *TickEpochRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(1, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TickEpochRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TickEpochRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *TickEpochRequest_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *TickEpochRequest_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TickEpochRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TickEpochRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TickEpochRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TickEpochRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "vub":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TickEpochRequest struct {
+ Body *TickEpochRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TickEpochRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*TickEpochRequest)(nil)
+ _ json.Marshaler = (*TickEpochRequest)(nil)
+ _ json.Unmarshaler = (*TickEpochRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *TickEpochRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *TickEpochRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *TickEpochRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TickEpochRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *TickEpochRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TickEpochRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TickEpochRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(TickEpochRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *TickEpochRequest) GetBody() *TickEpochRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *TickEpochRequest) SetBody(v *TickEpochRequest_Body) {
+ x.Body = v
+}
+func (x *TickEpochRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *TickEpochRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TickEpochRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TickEpochRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TickEpochRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TickEpochRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *TickEpochRequest_Body
+ f = new(TickEpochRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TickEpochResponse_Body struct {
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TickEpochResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*TickEpochResponse_Body)(nil)
+ _ json.Marshaler = (*TickEpochResponse_Body)(nil)
+ _ json.Unmarshaler = (*TickEpochResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *TickEpochResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.UInt32Size(1, x.Vub)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TickEpochResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *TickEpochResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(1, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TickEpochResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TickEpochResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *TickEpochResponse_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *TickEpochResponse_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TickEpochResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TickEpochResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TickEpochResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TickEpochResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "vub":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TickEpochResponse struct {
+ Body *TickEpochResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TickEpochResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*TickEpochResponse)(nil)
+ _ json.Marshaler = (*TickEpochResponse)(nil)
+ _ json.Unmarshaler = (*TickEpochResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *TickEpochResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *TickEpochResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *TickEpochResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TickEpochResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *TickEpochResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TickEpochResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TickEpochResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(TickEpochResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *TickEpochResponse) GetBody() *TickEpochResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *TickEpochResponse) SetBody(v *TickEpochResponse_Body) {
+ x.Body = v
+}
+func (x *TickEpochResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *TickEpochResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TickEpochResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TickEpochResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TickEpochResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TickEpochResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *TickEpochResponse_Body
+ f = new(TickEpochResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveNodeRequest_Body struct {
+ Key []byte `json:"key"`
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveNodeRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveNodeRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveNodeRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveNodeRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.BytesSize(1, x.Key)
+ size += proto.UInt32Size(2, x.Vub)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveNodeRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveNodeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendBytes(1, x.Key)
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(2, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveNodeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *RemoveNodeRequest_Body) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+func (x *RemoveNodeRequest_Body) SetKey(v []byte) {
+ x.Key = v
+}
+func (x *RemoveNodeRequest_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *RemoveNodeRequest_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveNodeRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveNodeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ if x.Key != nil {
+ out.Base64Bytes(x.Key)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveNodeRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveNodeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Key = f
+ }
+ case "vub":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveNodeRequest struct {
+ Body *RemoveNodeRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveNodeRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest)(nil)
+ _ json.Marshaler = (*RemoveNodeRequest)(nil)
+ _ json.Unmarshaler = (*RemoveNodeRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveNodeRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *RemoveNodeRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *RemoveNodeRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveNodeRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveNodeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveNodeRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveNodeRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveNodeRequest) GetBody() *RemoveNodeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveNodeRequest) SetBody(v *RemoveNodeRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveNodeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveNodeRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveNodeRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveNodeRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveNodeRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveNodeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveNodeRequest_Body
+ f = new(RemoveNodeRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveNodeResponse_Body struct {
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveNodeResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveNodeResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveNodeResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveNodeResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.UInt32Size(1, x.Vub)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveNodeResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveNodeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(1, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveNodeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *RemoveNodeResponse_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *RemoveNodeResponse_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveNodeResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveNodeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveNodeResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveNodeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "vub":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveNodeResponse struct {
+ Body *RemoveNodeResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveNodeResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse)(nil)
+ _ json.Marshaler = (*RemoveNodeResponse)(nil)
+ _ json.Unmarshaler = (*RemoveNodeResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveNodeResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *RemoveNodeResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *RemoveNodeResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveNodeResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveNodeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveNodeResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveNodeResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveNodeResponse) GetBody() *RemoveNodeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveNodeResponse) SetBody(v *RemoveNodeResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveNodeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveNodeResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveNodeResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveNodeResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveNodeResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveNodeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveNodeResponse_Body
+ f = new(RemoveNodeResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveContainerRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ Owner []byte `json:"owner"`
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveContainerRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveContainerRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveContainerRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveContainerRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.BytesSize(1, x.ContainerId)
+ size += proto.BytesSize(2, x.Owner)
+ size += proto.UInt32Size(3, x.Vub)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveContainerRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveContainerRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.Owner) != 0 {
+ mm.AppendBytes(2, x.Owner)
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(3, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveContainerRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // Owner
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Owner")
+ }
+ x.Owner = data
+ case 3: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *RemoveContainerRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *RemoveContainerRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *RemoveContainerRequest_Body) GetOwner() []byte {
+ if x != nil {
+ return x.Owner
+ }
+ return nil
+}
+func (x *RemoveContainerRequest_Body) SetOwner(v []byte) {
+ x.Owner = v
+}
+func (x *RemoveContainerRequest_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *RemoveContainerRequest_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveContainerRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveContainerRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"owner\":"
+ out.RawString(prefix)
+ if x.Owner != nil {
+ out.Base64Bytes(x.Owner)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveContainerRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveContainerRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "owner":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Owner = f
+ }
+ case "vub":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveContainerRequest struct {
+ Body *RemoveContainerRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveContainerRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest)(nil)
+ _ json.Marshaler = (*RemoveContainerRequest)(nil)
+ _ json.Unmarshaler = (*RemoveContainerRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveContainerRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *RemoveContainerRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *RemoveContainerRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveContainerRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveContainerRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveContainerRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveContainerRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveContainerRequest) GetBody() *RemoveContainerRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveContainerRequest) SetBody(v *RemoveContainerRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveContainerRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveContainerRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveContainerRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveContainerRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveContainerRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveContainerRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveContainerRequest_Body
+ f = new(RemoveContainerRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveContainerResponse_Body struct {
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveContainerResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveContainerResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveContainerResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveContainerResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.UInt32Size(1, x.Vub)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveContainerResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveContainerResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(1, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveContainerResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *RemoveContainerResponse_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *RemoveContainerResponse_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveContainerResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveContainerResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"vub\":"
+ out.RawString(prefix)
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveContainerResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveContainerResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "vub":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveContainerResponse struct {
+ Body *RemoveContainerResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveContainerResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse)(nil)
+ _ json.Marshaler = (*RemoveContainerResponse)(nil)
+ _ json.Unmarshaler = (*RemoveContainerResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveContainerResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *RemoveContainerResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *RemoveContainerResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveContainerResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveContainerResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveContainerResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveContainerResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveContainerResponse) GetBody() *RemoveContainerResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveContainerResponse) SetBody(v *RemoveContainerResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveContainerResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveContainerResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveContainerResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveContainerResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveContainerResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveContainerResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveContainerResponse_Body
+ f = new(RemoveContainerResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/control/ir/service_grpc.pb.go b/pkg/services/control/ir/service_grpc.pb.go
index b6bc6fdbab..336bf5f70d 100644
--- a/pkg/services/control/ir/service_grpc.pb.go
+++ b/pkg/services/control/ir/service_grpc.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.21.12
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.25.0
// source: pkg/services/control/ir/service.proto
package control
@@ -18,12 +18,26 @@ import (
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
+const (
+ ControlService_HealthCheck_FullMethodName = "/ircontrol.ControlService/HealthCheck"
+ ControlService_TickEpoch_FullMethodName = "/ircontrol.ControlService/TickEpoch"
+ ControlService_RemoveNode_FullMethodName = "/ircontrol.ControlService/RemoveNode"
+ ControlService_RemoveContainer_FullMethodName = "/ircontrol.ControlService/RemoveContainer"
+)
+
// ControlServiceClient is the client API for ControlService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ControlServiceClient interface {
// Performs health check of the IR node.
HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
+ // Forces a new epoch to be signaled by the IR node with high probability.
+ TickEpoch(ctx context.Context, in *TickEpochRequest, opts ...grpc.CallOption) (*TickEpochResponse, error)
+ // Forces a node removal to be signaled by the IR node with high probability.
+ RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error)
+ // Forces a container removal to be signaled by the IR node with high
+ // probability.
+ RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error)
}
type controlServiceClient struct {
@@ -36,7 +50,34 @@ func NewControlServiceClient(cc grpc.ClientConnInterface) ControlServiceClient {
func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
out := new(HealthCheckResponse)
- err := c.cc.Invoke(ctx, "/ircontrol.ControlService/HealthCheck", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_HealthCheck_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) TickEpoch(ctx context.Context, in *TickEpochRequest, opts ...grpc.CallOption) (*TickEpochResponse, error) {
+ out := new(TickEpochResponse)
+ err := c.cc.Invoke(ctx, ControlService_TickEpoch_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) {
+ out := new(RemoveNodeResponse)
+ err := c.cc.Invoke(ctx, ControlService_RemoveNode_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error) {
+ out := new(RemoveContainerResponse)
+ err := c.cc.Invoke(ctx, ControlService_RemoveContainer_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -49,6 +90,13 @@ func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckR
type ControlServiceServer interface {
// Performs health check of the IR node.
HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
+ // Forces a new epoch to be signaled by the IR node with high probability.
+ TickEpoch(context.Context, *TickEpochRequest) (*TickEpochResponse, error)
+ // Forces a node removal to be signaled by the IR node with high probability.
+ RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error)
+ // Forces a container removal to be signaled by the IR node with high
+ // probability.
+ RemoveContainer(context.Context, *RemoveContainerRequest) (*RemoveContainerResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -58,6 +106,15 @@ type UnimplementedControlServiceServer struct {
func (UnimplementedControlServiceServer) HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method HealthCheck not implemented")
}
+func (UnimplementedControlServiceServer) TickEpoch(context.Context, *TickEpochRequest) (*TickEpochResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method TickEpoch not implemented")
+}
+func (UnimplementedControlServiceServer) RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RemoveNode not implemented")
+}
+func (UnimplementedControlServiceServer) RemoveContainer(context.Context, *RemoveContainerRequest) (*RemoveContainerResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RemoveContainer not implemented")
+}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -80,7 +137,7 @@ func _ControlService_HealthCheck_Handler(srv interface{}, ctx context.Context, d
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ircontrol.ControlService/HealthCheck",
+ FullMethod: ControlService_HealthCheck_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).HealthCheck(ctx, req.(*HealthCheckRequest))
@@ -88,6 +145,60 @@ func _ControlService_HealthCheck_Handler(srv interface{}, ctx context.Context, d
return interceptor(ctx, in, info, handler)
}
+func _ControlService_TickEpoch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(TickEpochRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).TickEpoch(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_TickEpoch_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).TickEpoch(ctx, req.(*TickEpochRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_RemoveNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RemoveNodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).RemoveNode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_RemoveNode_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).RemoveNode(ctx, req.(*RemoveNodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_RemoveContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RemoveContainerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).RemoveContainer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_RemoveContainer_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).RemoveContainer(ctx, req.(*RemoveContainerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -99,6 +210,18 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "HealthCheck",
Handler: _ControlService_HealthCheck_Handler,
},
+ {
+ MethodName: "TickEpoch",
+ Handler: _ControlService_TickEpoch_Handler,
+ },
+ {
+ MethodName: "RemoveNode",
+ Handler: _ControlService_RemoveNode_Handler,
+ },
+ {
+ MethodName: "RemoveContainer",
+ Handler: _ControlService_RemoveContainer_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/ir/service.proto",
diff --git a/pkg/services/control/ir/service_test.go b/pkg/services/control/ir/service_test.go
deleted file mode 100644
index 54eef51484..0000000000
--- a/pkg/services/control/ir/service_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package control_test
-
-import (
- "testing"
-
- control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "github.com/stretchr/testify/require"
- "google.golang.org/protobuf/proto"
-)
-
-type protoMessage interface {
- StableMarshal([]byte) []byte
- proto.Message
-}
-
-func testStableMarshal(t *testing.T, m1, m2 protoMessage, cmp func(m1, m2 protoMessage) bool) {
- require.NoError(t, proto.Unmarshal(m1.StableMarshal(nil), m2))
-
- require.True(t, cmp(m1, m2))
-}
-
-func TestHealthCheckResponse_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateHealthCheckResponseBody(),
- new(control.HealthCheckResponse_Body),
- func(m1, m2 protoMessage) bool {
- return equalHealthCheckResponseBodies(
- m1.(*control.HealthCheckResponse_Body),
- m2.(*control.HealthCheckResponse_Body),
- )
- },
- )
-}
-
-func generateHealthCheckResponseBody() *control.HealthCheckResponse_Body {
- body := new(control.HealthCheckResponse_Body)
- body.SetHealthStatus(control.HealthStatus_SHUTTING_DOWN)
-
- return body
-}
-
-func equalHealthCheckResponseBodies(b1, b2 *control.HealthCheckResponse_Body) bool {
- return b1.GetHealthStatus() == b2.GetHealthStatus()
-}
diff --git a/pkg/services/control/ir/types.go b/pkg/services/control/ir/types.go
deleted file mode 100644
index 97ffd3ce33..0000000000
--- a/pkg/services/control/ir/types.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package control
-
-// SetKey sets public key used for signing.
-func (x *Signature) SetKey(v []byte) {
- if x != nil {
- x.Key = v
- }
-}
-
-// SetSign sets binary signature.
-func (x *Signature) SetSign(v []byte) {
- if x != nil {
- x.Sign = v
- }
-}
diff --git a/pkg/services/control/ir/types.pb.go b/pkg/services/control/ir/types.pb.go
deleted file mode 100644
index c89cd5a0d1..0000000000
--- a/pkg/services/control/ir/types.pb.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.21.12
-// source: pkg/services/control/ir/types.proto
-
-package control
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Health status of the IR application.
-type HealthStatus int32
-
-const (
- // Undefined status, default value.
- HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
- // IR application is starting.
- HealthStatus_STARTING HealthStatus = 1
- // IR application is started and serves all services.
- HealthStatus_READY HealthStatus = 2
- // IR application is shutting down.
- HealthStatus_SHUTTING_DOWN HealthStatus = 3
-)
-
-// Enum value maps for HealthStatus.
-var (
- HealthStatus_name = map[int32]string{
- 0: "HEALTH_STATUS_UNDEFINED",
- 1: "STARTING",
- 2: "READY",
- 3: "SHUTTING_DOWN",
- }
- HealthStatus_value = map[string]int32{
- "HEALTH_STATUS_UNDEFINED": 0,
- "STARTING": 1,
- "READY": 2,
- "SHUTTING_DOWN": 3,
- }
-)
-
-func (x HealthStatus) Enum() *HealthStatus {
- p := new(HealthStatus)
- *p = x
- return p
-}
-
-func (x HealthStatus) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (HealthStatus) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_ir_types_proto_enumTypes[0].Descriptor()
-}
-
-func (HealthStatus) Type() protoreflect.EnumType {
- return &file_pkg_services_control_ir_types_proto_enumTypes[0]
-}
-
-func (x HealthStatus) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use HealthStatus.Descriptor instead.
-func (HealthStatus) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_types_proto_rawDescGZIP(), []int{0}
-}
-
-// Signature of some message.
-type Signature struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Public key used for signing.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Binary signature.
- Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
-}
-
-func (x *Signature) Reset() {
- *x = Signature{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_types_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Signature) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Signature) ProtoMessage() {}
-
-func (x *Signature) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_types_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
-func (*Signature) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_types_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Signature) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-
-func (x *Signature) GetSign() []byte {
- if x != nil {
- return x.Sign
- }
- return nil
-}
-
-var File_pkg_services_control_ir_types_proto protoreflect.FileDescriptor
-
-var file_pkg_services_control_ir_types_proto_rawDesc = []byte{
- 0x0a, 0x23, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2a, 0x57, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x48, 0x45, 0x41, 0x4c,
- 0x54, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49,
- 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e,
- 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x11,
- 0x0a, 0x0d, 0x53, 0x48, 0x55, 0x54, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x10,
- 0x03, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73,
- 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c,
- 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
- 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x72, 0x2f,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_control_ir_types_proto_rawDescOnce sync.Once
- file_pkg_services_control_ir_types_proto_rawDescData = file_pkg_services_control_ir_types_proto_rawDesc
-)
-
-func file_pkg_services_control_ir_types_proto_rawDescGZIP() []byte {
- file_pkg_services_control_ir_types_proto_rawDescOnce.Do(func() {
- file_pkg_services_control_ir_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_ir_types_proto_rawDescData)
- })
- return file_pkg_services_control_ir_types_proto_rawDescData
-}
-
-var file_pkg_services_control_ir_types_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_pkg_services_control_ir_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_pkg_services_control_ir_types_proto_goTypes = []interface{}{
- (HealthStatus)(0), // 0: ircontrol.HealthStatus
- (*Signature)(nil), // 1: ircontrol.Signature
-}
-var file_pkg_services_control_ir_types_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_control_ir_types_proto_init() }
-func file_pkg_services_control_ir_types_proto_init() {
- if File_pkg_services_control_ir_types_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_control_ir_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Signature); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_control_ir_types_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_pkg_services_control_ir_types_proto_goTypes,
- DependencyIndexes: file_pkg_services_control_ir_types_proto_depIdxs,
- EnumInfos: file_pkg_services_control_ir_types_proto_enumTypes,
- MessageInfos: file_pkg_services_control_ir_types_proto_msgTypes,
- }.Build()
- File_pkg_services_control_ir_types_proto = out.File
- file_pkg_services_control_ir_types_proto_rawDesc = nil
- file_pkg_services_control_ir_types_proto_goTypes = nil
- file_pkg_services_control_ir_types_proto_depIdxs = nil
-}
diff --git a/pkg/services/control/ir/types.proto b/pkg/services/control/ir/types.proto
index a6897fad16..901a55918b 100644
--- a/pkg/services/control/ir/types.proto
+++ b/pkg/services/control/ir/types.proto
@@ -6,24 +6,27 @@ option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/ir/
// Signature of some message.
message Signature {
- // Public key used for signing.
- bytes key = 1 [json_name = "key"];
+ // Public key used for signing.
+ bytes key = 1 [ json_name = "key" ];
- // Binary signature.
- bytes sign = 2 [json_name = "signature"];
+ // Binary signature.
+ bytes sign = 2 [ json_name = "signature" ];
}
// Health status of the IR application.
enum HealthStatus {
- // Undefined status, default value.
- HEALTH_STATUS_UNDEFINED = 0;
+ // Undefined status, default value.
+ HEALTH_STATUS_UNDEFINED = 0;
- // IR application is starting.
- STARTING = 1;
+ // IR application is starting.
+ STARTING = 1;
- // IR application is started and serves all services.
- READY = 2;
+ // IR application is started and serves all services.
+ READY = 2;
- // IR application is shutting down.
- SHUTTING_DOWN = 3;
+ // IR application is shutting down.
+ SHUTTING_DOWN = 3;
+
+ // IR application is reconfiguring.
+ RECONFIGURING = 4;
}
diff --git a/pkg/services/control/ir/types_frostfs.pb.go b/pkg/services/control/ir/types_frostfs.pb.go
index 50679e785c..407eec6ad8 100644
--- a/pkg/services/control/ir/types_frostfs.pb.go
+++ b/pkg/services/control/ir/types_frostfs.pb.go
@@ -2,34 +2,246 @@
package control
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
+)
+
+type HealthStatus int32
+
+const (
+ HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
+ HealthStatus_STARTING HealthStatus = 1
+ HealthStatus_READY HealthStatus = 2
+ HealthStatus_SHUTTING_DOWN HealthStatus = 3
+ HealthStatus_RECONFIGURING HealthStatus = 4
+)
+
+var (
+ HealthStatus_name = map[int32]string{
+ 0: "HEALTH_STATUS_UNDEFINED",
+ 1: "STARTING",
+ 2: "READY",
+ 3: "SHUTTING_DOWN",
+ 4: "RECONFIGURING",
+ }
+ HealthStatus_value = map[string]int32{
+ "HEALTH_STATUS_UNDEFINED": 0,
+ "STARTING": 1,
+ "READY": 2,
+ "SHUTTING_DOWN": 3,
+ "RECONFIGURING": 4,
+ }
+)
+
+func (x HealthStatus) String() string {
+ if v, ok := HealthStatus_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *HealthStatus) FromString(s string) bool {
+ if v, ok := HealthStatus_value[s]; ok {
+ *x = HealthStatus(v)
+ return true
+ }
+ return false
+}
+
+type Signature struct {
+ Key []byte `json:"key"`
+ Sign []byte `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*Signature)(nil)
+ _ encoding.ProtoUnmarshaler = (*Signature)(nil)
+ _ json.Marshaler = (*Signature)(nil)
+ _ json.Unmarshaler = (*Signature)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *Signature) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.Key)
size += proto.BytesSize(2, x.Sign)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *Signature) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Key)
- offset += proto.BytesMarshal(2, buf[offset:], x.Sign)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *Signature) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendBytes(1, x.Key)
+ }
+ if len(x.Sign) != 0 {
+ mm.AppendBytes(2, x.Sign)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *Signature) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "Signature")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Sign
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Sign")
+ }
+ x.Sign = data
+ }
+ }
+ return nil
+}
+func (x *Signature) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+func (x *Signature) SetKey(v []byte) {
+ x.Key = v
+}
+func (x *Signature) GetSign() []byte {
+ if x != nil {
+ return x.Sign
+ }
+ return nil
+}
+func (x *Signature) SetSign(v []byte) {
+ x.Sign = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *Signature) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ if x.Key != nil {
+ out.Base64Bytes(x.Key)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ if x.Sign != nil {
+ out.Base64Bytes(x.Sign)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *Signature) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Key = f
+ }
+ case "signature":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Sign = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 0779e177bd..0c4236d0ee 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -1,23 +1,38 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common"
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
)
const serviceName = "control.ControlService"
const (
- rpcHealthCheck = "HealthCheck"
- rpcSetNetmapStatus = "SetNetmapStatus"
- rpcDropObjects = "DropObjects"
- rpcListShards = "ListShards"
- rpcSetShardMode = "SetShardMode"
- rpcDumpShard = "DumpShard"
- rpcRestoreShard = "RestoreShard"
- rpcSynchronizeTree = "SynchronizeTree"
- rpcEvacuateShard = "EvacuateShard"
- rpcFlushCache = "FlushCache"
+ rpcHealthCheck = "HealthCheck"
+ rpcSetNetmapStatus = "SetNetmapStatus"
+ rpcGetNetmapStatus = "GetNetmapStatus"
+ rpcDropObjects = "DropObjects"
+ rpcListShards = "ListShards"
+ rpcSetShardMode = "SetShardMode"
+ rpcSynchronizeTree = "SynchronizeTree"
+ rpcStartShardEvacuation = "StartShardEvacuation"
+ rpcGetShardEvacuationStatus = "GetShardEvacuationStatus"
+ rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus"
+ rpcStopShardEvacuation = "StopShardEvacuation"
+ rpcFlushCache = "FlushCache"
+ rpcDoctor = "Doctor"
+ rpcAddChainLocalOverride = "AddChainLocalOverride"
+ rpcGetChainLocalOverride = "GetChainLocalOverride"
+ rpcListChainLocalOverrides = "ListChainLocalOverrides"
+ rpcRemoveChainLocalOverride = "RemoveChainLocalOverride"
+ rpcRemoveChainLocalOverridesByTarget = "RemoveChainLocalOverridesByTarget"
+ rpcSealWriteCache = "SealWriteCache"
+ rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides"
+ rpcDetachShards = "DetachShards"
+ rpcStartShardRebuild = "StartShardRebuild"
+ rpcListShardsForObject = "ListShardsForObject"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -26,10 +41,7 @@ func HealthCheck(
req *HealthCheckRequest,
opts ...client.CallOption,
) (*HealthCheckResponse, error) {
- wResp := &healthCheckResponseWrapper{
- m: new(HealthCheckResponse),
- }
-
+ wResp := newResponseWrapper[HealthCheckResponse]()
wReq := &requestWrapper{
m: req,
}
@@ -39,7 +51,7 @@ func HealthCheck(
return nil, err
}
- return wResp.m, nil
+ return wResp.message, nil
}
// SetNetmapStatus executes ControlService.SetNetmapStatus RPC.
@@ -48,9 +60,7 @@ func SetNetmapStatus(
req *SetNetmapStatusRequest,
opts ...client.CallOption,
) (*SetNetmapStatusResponse, error) {
- wResp := &setNetmapStatusResponseWrapper{
- m: new(SetNetmapStatusResponse),
- }
+ wResp := newResponseWrapper[SetNetmapStatusResponse]()
wReq := &requestWrapper{
m: req,
@@ -61,7 +71,28 @@ func SetNetmapStatus(
return nil, err
}
- return wResp.m, nil
+ return wResp.message, nil
+}
+
+// GetNetmapStatus executes ControlService.GetNetmapStatus RPC.
+func GetNetmapStatus(
+ _ context.Context,
+ cli *client.Client,
+ req *GetNetmapStatusRequest,
+ opts ...client.CallOption,
+) (*GetNetmapStatusResponse, error) {
+ wResp := newResponseWrapper[GetNetmapStatusResponse]()
+
+ wReq := &requestWrapper{
+ m: req,
+ }
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcGetNetmapStatus), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
}
// DropObjects executes ControlService.DropObjects RPC.
@@ -70,9 +101,7 @@ func DropObjects(
req *DropObjectsRequest,
opts ...client.CallOption,
) (*DropObjectsResponse, error) {
- wResp := &dropObjectsResponseWrapper{
- m: new(DropObjectsResponse),
- }
+ wResp := newResponseWrapper[DropObjectsResponse]()
wReq := &requestWrapper{
m: req,
@@ -82,7 +111,7 @@ func DropObjects(
return nil, err
}
- return wResp.m, nil
+ return wResp.message, nil
}
// ListShards executes ControlService.ListShards RPC.
@@ -91,9 +120,7 @@ func ListShards(
req *ListShardsRequest,
opts ...client.CallOption,
) (*ListShardsResponse, error) {
- wResp := &listShardsResponseWrapper{
- m: new(ListShardsResponse),
- }
+ wResp := newResponseWrapper[ListShardsResponse]()
wReq := &requestWrapper{
m: req,
@@ -103,7 +130,7 @@ func ListShards(
return nil, err
}
- return wResp.m, nil
+ return wResp.message, nil
}
// SetShardMode executes ControlService.SetShardMode RPC.
@@ -112,9 +139,7 @@ func SetShardMode(
req *SetShardModeRequest,
opts ...client.CallOption,
) (*SetShardModeResponse, error) {
- wResp := &setShardModeResponseWrapper{
- m: new(SetShardModeResponse),
- }
+ wResp := newResponseWrapper[SetShardModeResponse]()
wReq := &requestWrapper{
m: req,
@@ -124,38 +149,12 @@ func SetShardMode(
return nil, err
}
- return wResp.m, nil
-}
-
-// DumpShard executes ControlService.DumpShard RPC.
-func DumpShard(cli *client.Client, req *DumpShardRequest, opts ...client.CallOption) (*DumpShardResponse, error) {
- wResp := &dumpShardResponseWrapper{new(DumpShardResponse)}
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDumpShard), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.DumpShardResponse, nil
-}
-
-// RestoreShard executes ControlService.DumpShard RPC.
-func RestoreShard(cli *client.Client, req *RestoreShardRequest, opts ...client.CallOption) (*RestoreShardResponse, error) {
- wResp := &restoreShardResponseWrapper{new(RestoreShardResponse)}
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcRestoreShard), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.RestoreShardResponse, nil
+ return wResp.message, nil
}
// SynchronizeTree executes ControlService.SynchronizeTree RPC.
func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...client.CallOption) (*SynchronizeTreeResponse, error) {
- wResp := &synchronizeTreeResponseWrapper{new(SynchronizeTreeResponse)}
+ wResp := newResponseWrapper[SynchronizeTreeResponse]()
wReq := &requestWrapper{m: req}
err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcSynchronizeTree), wReq, wResp, opts...)
@@ -163,25 +162,64 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl
return nil, err
}
- return wResp.SynchronizeTreeResponse, nil
+ return wResp.message, nil
}
-// EvacuateShard executes ControlService.EvacuateShard RPC.
-func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) {
- wResp := &evacuateShardResponseWrapper{new(EvacuateShardResponse)}
+// StartShardEvacuation executes ControlService.StartShardEvacuation RPC.
+func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) {
+ wResp := newResponseWrapper[StartShardEvacuationResponse]()
wReq := &requestWrapper{m: req}
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...)
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStartShardEvacuation), wReq, wResp, opts...)
if err != nil {
return nil, err
}
- return wResp.EvacuateShardResponse, nil
+ return wResp.message, nil
+}
+
+// GetShardEvacuationStatus executes ControlService.GetShardEvacuationStatus RPC.
+func GetShardEvacuationStatus(cli *client.Client, req *GetShardEvacuationStatusRequest, opts ...client.CallOption) (*GetShardEvacuationStatusResponse, error) {
+ wResp := newResponseWrapper[GetShardEvacuationStatusResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcGetShardEvacuationStatus), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// StopShardEvacuation executes ControlService.StopShardEvacuation RPC.
+func StopShardEvacuation(cli *client.Client, req *StopShardEvacuationRequest, opts ...client.CallOption) (*StopShardEvacuationResponse, error) {
+ wResp := newResponseWrapper[StopShardEvacuationResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStopShardEvacuation), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// ResetShardEvacuationStatus executes ControlService.ResetShardEvacuationStatus RPC.
+func ResetShardEvacuationStatus(cli *client.Client, req *ResetShardEvacuationStatusRequest, opts ...client.CallOption) (*ResetShardEvacuationStatusResponse, error) {
+ wResp := newResponseWrapper[ResetShardEvacuationStatusResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcResetShardEvacuationStatus), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
}
// FlushCache executes ControlService.FlushCache RPC.
func FlushCache(cli *client.Client, req *FlushCacheRequest, opts ...client.CallOption) (*FlushCacheResponse, error) {
- wResp := &flushCacheResponseWrapper{new(FlushCacheResponse)}
+ wResp := newResponseWrapper[FlushCacheResponse]()
wReq := &requestWrapper{m: req}
err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcFlushCache), wReq, wResp, opts...)
@@ -189,5 +227,160 @@ func FlushCache(cli *client.Client, req *FlushCacheRequest, opts ...client.CallO
return nil, err
}
- return wResp.FlushCacheResponse, nil
+ return wResp.message, nil
+}
+
+// Doctor executes ControlService.Doctor RPC.
+func Doctor(cli *client.Client, req *DoctorRequest, opts ...client.CallOption) (*DoctorResponse, error) {
+ wResp := newResponseWrapper[DoctorResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDoctor), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// AddChainLocalOverride executes ControlService.AddChainLocalOverride RPC.
+func AddChainLocalOverride(cli *client.Client, req *AddChainLocalOverrideRequest, opts ...client.CallOption) (*AddChainLocalOverrideResponse, error) {
+ wResp := newResponseWrapper[AddChainLocalOverrideResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcAddChainLocalOverride), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// ListChainLocalOverrides executes ControlService.ListChainLocalOverrides RPC.
+func ListChainLocalOverrides(cli *client.Client, req *ListChainLocalOverridesRequest, opts ...client.CallOption) (*ListChainLocalOverridesResponse, error) {
+ wResp := newResponseWrapper[ListChainLocalOverridesResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListChainLocalOverrides), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// ListTargetsLocalOverrides executes ControlService.ListTargetsLocalOverrides RPC.
+func ListTargetsLocalOverrides(cli *client.Client, req *ListTargetsLocalOverridesRequest, opts ...client.CallOption) (*ListTargetsLocalOverridesResponse, error) {
+ wResp := newResponseWrapper[ListTargetsLocalOverridesResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListTargetsLocalOverrides), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// GetChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
+func GetChainLocalOverride(cli *client.Client, req *GetChainLocalOverrideRequest, opts ...client.CallOption) (*GetChainLocalOverrideResponse, error) {
+ wResp := newResponseWrapper[GetChainLocalOverrideResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcGetChainLocalOverride), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// RemoveChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
+func RemoveChainLocalOverride(cli *client.Client, req *RemoveChainLocalOverrideRequest, opts ...client.CallOption) (*RemoveChainLocalOverrideResponse, error) {
+ wResp := newResponseWrapper[RemoveChainLocalOverrideResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcRemoveChainLocalOverride), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// RemoveChainLocalOverridesByTarget executes ControlService.RemoveChainLocalOverridesByTarget RPC.
+func RemoveChainLocalOverridesByTarget(cli *client.Client, req *RemoveChainLocalOverridesByTargetRequest, opts ...client.CallOption) (*RemoveChainLocalOverridesByTargetResponse, error) {
+ wResp := newResponseWrapper[RemoveChainLocalOverridesByTargetResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcRemoveChainLocalOverridesByTarget), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// SealWriteCache executes ControlService.SealWriteCache RPC.
+func SealWriteCache(cli *client.Client, req *SealWriteCacheRequest, opts ...client.CallOption) (*SealWriteCacheResponse, error) {
+ wResp := newResponseWrapper[SealWriteCacheResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcSealWriteCache), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// DetachShards executes ControlService.DetachShards RPC.
+func DetachShards(
+ cli *client.Client,
+ req *DetachShardsRequest,
+ opts ...client.CallOption,
+) (*DetachShardsResponse, error) {
+ wResp := newResponseWrapper[DetachShardsResponse]()
+
+ wReq := &requestWrapper{
+ m: req,
+ }
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDetachShards), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// StartShardRebuild executes ControlService.StartShardRebuild RPC.
+func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts ...client.CallOption) (*StartShardRebuildResponse, error) {
+ wResp := newResponseWrapper[StartShardRebuildResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStartShardRebuild), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
+// ListShardsForObject executes ControlService.ListShardsForObject RPC.
+func ListShardsForObject(
+ cli *client.Client,
+ req *ListShardsForObjectRequest,
+ opts ...client.CallOption,
+) (*ListShardsForObjectResponse, error) {
+ wResp := newResponseWrapper[ListShardsForObjectResponse]()
+
+ wReq := &requestWrapper{
+ m: req,
+ }
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
}
diff --git a/pkg/services/control/server/ape/validate.go b/pkg/services/control/server/ape/validate.go
new file mode 100644
index 0000000000..f4aa0399f6
--- /dev/null
+++ b/pkg/services/control/server/ape/validate.go
@@ -0,0 +1,97 @@
+package ape
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+)
+
+var (
+ ErrInvalidResource = errors.New("invalid resource name")
+ ErrUnsupportedPrefix = errors.New("unsupported resource name prefix")
+ ErrInvalidContainerID = errors.New("invalid container id")
+ ErrInvalidObjectID = errors.New("invalid object id")
+ ErrInvalidNamespace = fmt.Errorf("namespace must match regexp: %s", ape.NamespaceNameRegexp.String())
+)
+
+// ValidateResourceName validates resource name components - container and object id, namespace.
+// Also validates matching resource name to templates of policy engine's native scheme.
+func ValidateResourceName(name string) error {
+ if after, found := strings.CutPrefix(name, native.ObjectPrefix+"/"); found {
+ return validateObjectResourceName(after)
+ } else if after, found = strings.CutPrefix(name, native.ContainerPrefix+"/"); found {
+ return validateContainerResourceName(after)
+ }
+ return ErrUnsupportedPrefix
+}
+
+// validateObjectResourceName validate name for object.
+// Name should be without prefix `native.ObjectPrefix`.
+func validateObjectResourceName(name string) error {
+ if name == "*" {
+ return nil
+ }
+ lexems := strings.Split(name, "/")
+ if len(lexems) == 1 && lexems[0] == "*" {
+ return nil
+ } else if len(lexems) == 2 {
+ // len == 2 means format `namespace(root_namespace)/*`
+ if lexems[0] != "" && !ape.NamespaceNameRegexp.MatchString(lexems[0]) {
+ return ErrInvalidNamespace
+ }
+ if lexems[1] == "*" {
+ return nil
+ }
+ } else if len(lexems) == 3 {
+ // len == 3 means format `namespace(root_namespace)/CID/OID(*)`
+ if lexems[0] != "" && !ape.NamespaceNameRegexp.MatchString(lexems[0]) {
+ return ErrInvalidNamespace
+ }
+ var cnr cid.ID
+ err := cnr.DecodeString(lexems[1])
+ if err != nil {
+ return fmt.Errorf("%w: %w", ErrInvalidContainerID, err)
+ }
+ if lexems[2] == "*" {
+ return nil
+ }
+ var objID oid.ID
+ err = objID.DecodeString(lexems[2])
+ if err != nil {
+ return fmt.Errorf("%w: %w", ErrInvalidObjectID, err)
+ }
+ return nil
+ }
+ return ErrInvalidResource
+}
+
+// validateContainerResourceName validate resource name for container.
+// Name should be without prefix `native.ContainerPrefix`.
+func validateContainerResourceName(name string) error {
+ if name == "*" {
+ return nil
+ }
+ lexems := strings.Split(name, "/")
+ if len(lexems) == 1 && lexems[0] == "*" {
+ return nil
+ } else if len(lexems) == 2 {
+ // len == 2 means format `namespace(root_namespace)/CID(*)`
+ if lexems[0] != "" && !ape.NamespaceNameRegexp.MatchString(lexems[0]) {
+ return ErrInvalidNamespace
+ }
+ if lexems[1] != "*" {
+ var cnr cid.ID
+ err := cnr.DecodeString(lexems[1])
+ if err != nil {
+ return fmt.Errorf("%w: %w", ErrInvalidContainerID, err)
+ }
+ }
+ return nil
+ }
+ return ErrInvalidResource
+}
diff --git a/pkg/services/control/server/ape/validate_test.go b/pkg/services/control/server/ape/validate_test.go
new file mode 100644
index 0000000000..af811efed6
--- /dev/null
+++ b/pkg/services/control/server/ape/validate_test.go
@@ -0,0 +1,132 @@
+package ape
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/stretchr/testify/require"
+)
+
+func TestValidationOfChainResources(t *testing.T) {
+ tests := [...]struct {
+ testName string
+ resourceName string
+ expectErr error
+ }{
+ {
+ testName: "native object: all objects",
+ resourceName: native.ObjectPrefix + "/*",
+ },
+ {
+ testName: "native object: all objects in namespace",
+ resourceName: native.ObjectPrefix + "/ns/*",
+ },
+ {
+ testName: "native object: all objects in root namespace",
+ resourceName: native.ObjectPrefix + "//*",
+ },
+ {
+ testName: "native object: all objects in namespace/container",
+ resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/*",
+ },
+ {
+ testName: "native object: all objects in root namespace/container",
+ resourceName: native.ObjectPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/*",
+ },
+ {
+ testName: "native object: object in namespace/container",
+ resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/BCGsUu6o92oG1UALVox1sV6YbBUKUL2xSCtAFkrsuvWY",
+ },
+ {
+ testName: "native object: object in root namespace/container",
+ resourceName: native.ObjectPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/BCGsUu6o92oG1UALVox1sV6YbBUKUL2xSCtAFkrsuvWY",
+ },
+ {
+ testName: "native object: invalid all objects",
+ resourceName: native.ObjectPrefix + "/*12313",
+ expectErr: ErrInvalidResource,
+ },
+ {
+ testName: "native object: all objects in invalid namespace",
+ resourceName: native.ObjectPrefix + "/qwe_123123/*",
+ expectErr: ErrInvalidNamespace,
+ },
+ {
+ testName: "native object: invalid all objects in root namespace",
+ resourceName: native.ObjectPrefix + "//qwe",
+ expectErr: ErrInvalidResource,
+ },
+ {
+ testName: "native object: invalid cid in all objects in root namespace",
+ resourceName: native.ObjectPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytHqwe/*",
+ expectErr: ErrInvalidContainerID,
+ },
+ {
+ testName: "native object: invalid cid in all objects in namespace",
+ resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytHqwe/*",
+ expectErr: ErrInvalidContainerID,
+ },
+ {
+ testName: "native object: invalid object in namespace/container",
+ resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/BCGsUu6o92oG1UALVox1sV6YbBUKUL2xSCtAFkrsuvWY111",
+ expectErr: ErrInvalidObjectID,
+ },
+ {
+ testName: "native object: invalid resource",
+ resourceName: native.ObjectPrefix + "/ns/SeHNpifD/AFkrsuvWY111/AFkrsuvWY222",
+ expectErr: ErrInvalidResource,
+ },
+ {
+ testName: "native container: all containers",
+ resourceName: native.ContainerPrefix + "/*",
+ },
+ {
+ testName: "native container: all containers in namespace",
+ resourceName: native.ContainerPrefix + "/ns/*",
+ },
+ {
+ testName: "native container: all containers in root namespace",
+ resourceName: native.ContainerPrefix + "//*",
+ },
+ {
+ testName: "native container: container in namespace",
+ resourceName: native.ContainerPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH",
+ },
+ {
+ testName: "native container: container in root namespace",
+ resourceName: native.ContainerPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH",
+ },
+ {
+ testName: "native container: invalid all containers",
+ resourceName: native.ContainerPrefix + "/*asd",
+ expectErr: ErrInvalidResource,
+ },
+ {
+ testName: "native container: invalid resource",
+ resourceName: native.ContainerPrefix + "/ns/cid/cid",
+ expectErr: ErrInvalidResource,
+ },
+ {
+ testName: "native container: invalid container in root namespace",
+ resourceName: native.ContainerPrefix + "//*asd",
+ expectErr: ErrInvalidContainerID,
+ },
+ {
+ testName: "native container: container in invalid namespace",
+ resourceName: native.ContainerPrefix + "/ns_111/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH",
+ expectErr: ErrInvalidNamespace,
+ },
+ {
+ testName: "unsupported prefix",
+ resourceName: "native:test/ns_111/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH",
+ expectErr: ErrUnsupportedPrefix,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.testName, func(t *testing.T) {
+ err := ValidateResourceName(test.resourceName)
+ require.ErrorIs(t, err, test.expectErr)
+ })
+ }
+}
diff --git a/pkg/services/control/server/convert.go b/pkg/services/control/server/convert.go
new file mode 100644
index 0000000000..61d7e41c1f
--- /dev/null
+++ b/pkg/services/control/server/convert.go
@@ -0,0 +1,63 @@
+package control
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "github.com/mr-tron/base58"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func stateToResponse(state *engine.EvacuationState) (*control.GetShardEvacuationStatusResponse, error) {
+ shardIDs := make([][]byte, 0, len(state.ShardIDs()))
+ for _, shID := range state.ShardIDs() {
+ id, err := base58.Decode(shID)
+ if err != nil {
+ return nil, status.Error(codes.Internal, "invalid shard id format: "+shID)
+ }
+ shardIDs = append(shardIDs, id)
+ }
+ var evacStatus control.GetShardEvacuationStatusResponse_Body_Status
+ switch state.ProcessingStatus() {
+ case engine.EvacuateProcessStateRunning:
+ evacStatus = control.GetShardEvacuationStatusResponse_Body_RUNNING
+ case engine.EvacuateProcessStateCompleted:
+ evacStatus = control.GetShardEvacuationStatusResponse_Body_COMPLETED
+ default:
+ evacStatus = control.GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED
+ }
+ var startedAt *control.GetShardEvacuationStatusResponse_Body_UnixTimestamp
+ if state.StartedAt() != nil {
+ startedAt = &control.GetShardEvacuationStatusResponse_Body_UnixTimestamp{
+ Value: state.StartedAt().Unix(),
+ }
+ }
+ var duration *control.GetShardEvacuationStatusResponse_Body_Duration
+ if state.StartedAt() != nil {
+ end := time.Now().UTC()
+ if state.FinishedAt() != nil {
+ end = *state.FinishedAt()
+ }
+ duration = &control.GetShardEvacuationStatusResponse_Body_Duration{
+ Seconds: int64(end.Sub(*state.StartedAt()).Seconds()),
+ }
+ }
+ return &control.GetShardEvacuationStatusResponse{
+ Body: &control.GetShardEvacuationStatusResponse_Body{
+ Shard_ID: shardIDs,
+ EvacuatedObjects: state.ObjectsEvacuated(),
+ TotalObjects: state.ObjectsTotal(),
+ FailedObjects: state.ObjectsFailed(),
+ Status: evacStatus,
+ StartedAt: startedAt,
+ Duration: duration,
+ ErrorMessage: state.ErrorMessage(),
+ SkippedObjects: state.ObjectsSkipped(),
+ TotalTrees: state.TreesTotal(),
+ EvacuatedTrees: state.TreesEvacuated(),
+ FailedTrees: state.TreesFailed(),
+ },
+ }, nil
+}
diff --git a/pkg/services/control/server/ctrlmessage/sign.go b/pkg/services/control/server/ctrlmessage/sign.go
new file mode 100644
index 0000000000..d9d5c5f5ee
--- /dev/null
+++ b/pkg/services/control/server/ctrlmessage/sign.go
@@ -0,0 +1,44 @@
+package ctrlmessage
+
+import (
+ "crypto/ecdsa"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
+ frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
+)
+
+type SignedMessage interface {
+ ReadSignedData([]byte) ([]byte, error)
+ GetSignature() *control.Signature
+ SetSignature(*control.Signature)
+}
+
+// Sign signs Control service ctrlmessage with private key.
+func Sign(key *ecdsa.PrivateKey, msg SignedMessage) error {
+ binBody, err := msg.ReadSignedData(nil)
+ if err != nil {
+ return fmt.Errorf("marshal request body: %w", err)
+ }
+
+ var sig frostfscrypto.Signature
+
+ err = sig.Calculate(frostfsecdsa.Signer(*key), binBody)
+ if err != nil {
+ return fmt.Errorf("calculate signature: %w", err)
+ }
+
+ // TODO(@cthulhu-rider): #468 use Signature ctrlmessage from FrostFS API to avoid conversion
+ var sigV2 refs.Signature
+ sig.WriteToV2(&sigV2)
+
+ var sigControl control.Signature
+ sigControl.SetKey(sigV2.GetKey())
+ sigControl.SetSign(sigV2.GetSign())
+
+ msg.SetSignature(&sigControl)
+
+ return nil
+}
diff --git a/pkg/services/control/server/detach_shards.go b/pkg/services/control/server/detach_shards.go
new file mode 100644
index 0000000000..ffd36962ba
--- /dev/null
+++ b/pkg/services/control/server/detach_shards.go
@@ -0,0 +1,38 @@
+package control
+
+import (
+ "context"
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ shardIDs := s.getShardIDList(req.GetBody().GetShard_ID())
+
+ if err := s.s.DetachShards(ctx, shardIDs); err != nil {
+ if errors.As(err, new(logicerr.Logical)) {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.DetachShardsResponse{
+ Body: &control.DetachShardsResponse_Body{},
+ }
+
+ if err = ctrlmessage.Sign(s.key, resp); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ return resp, nil
+}
diff --git a/pkg/services/control/server/doctor.go b/pkg/services/control/server/doctor.go
new file mode 100644
index 0000000000..80041de44e
--- /dev/null
+++ b/pkg/services/control/server/doctor.go
@@ -0,0 +1,38 @@
+package control
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) Doctor(ctx context.Context, req *control.DoctorRequest) (*control.DoctorResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ if !req.GetBody().GetRemoveDuplicates() {
+ return nil, status.Error(codes.InvalidArgument, "operation not specified")
+ }
+
+ var prm engine.RemoveDuplicatesPrm
+ prm.Concurrency = int(req.GetBody().GetConcurrency())
+
+ err = s.s.RemoveDuplicates(ctx, prm)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.DoctorResponse{Body: &control.DoctorResponse_Body{}}
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
diff --git a/pkg/services/control/server/dump.go b/pkg/services/control/server/dump.go
deleted file mode 100644
index 28be02aa4e..0000000000
--- a/pkg/services/control/server/dump.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package control
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) DumpShard(_ context.Context, req *control.DumpShardRequest) (*control.DumpShardResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID())
-
- var prm shard.DumpPrm
- prm.WithPath(req.GetBody().GetFilepath())
- prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
-
- err = s.s.DumpShard(shardID, prm)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := new(control.DumpShardResponse)
- resp.SetBody(new(control.DumpShardResponse_Body))
-
- err = SignMessage(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
deleted file mode 100644
index 6c064efa3a..0000000000
--- a/pkg/services/control/server/evacuate.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package control
-
-import (
- "bytes"
- "context"
- "crypto/sha256"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) EvacuateShard(_ context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- var prm engine.EvacuateShardPrm
- prm.WithShardIDList(s.getShardIDList(req.GetBody().GetShard_ID()))
- prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
- prm.WithFaultHandler(s.replicate)
-
- res, err := s.s.Evacuate(prm)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := &control.EvacuateShardResponse{
- Body: &control.EvacuateShardResponse_Body{
- Count: uint32(res.Count()),
- },
- }
-
- err = SignMessage(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func (s *Server) replicate(addr oid.Address, obj *objectSDK.Object) error {
- cid, ok := obj.ContainerID()
- if !ok {
- // Return nil to prevent situations where a shard can't be evacuated
- // because of a single bad/corrupted object.
- return nil
- }
-
- nm, err := s.netMapSrc.GetNetMap(0)
- if err != nil {
- return err
- }
-
- c, err := s.cnrSrc.Get(cid)
- if err != nil {
- return err
- }
-
- binCnr := make([]byte, sha256.Size)
- cid.Encode(binCnr)
-
- ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
- if err != nil {
- return fmt.Errorf("can't build a list of container nodes")
- }
-
- nodes := placement.FlattenNodes(ns)
- bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := 0; i < len(nodes); i++ {
- if bytes.Equal(nodes[i].PublicKey(), bs) {
- copy(nodes[i:], nodes[i+1:])
- nodes = nodes[:len(nodes)-1]
- }
- }
-
- var res replicatorResult
- var task replicator.Task
- task.SetObject(obj)
- task.SetObjectAddress(addr)
- task.SetCopiesNumber(1)
- task.SetNodes(nodes)
- s.replicator.HandleTask(context.TODO(), task, &res)
-
- if res.count == 0 {
- return errors.New("object was not replicated")
- }
- return nil
-}
-
-type replicatorResult struct {
- count int
-}
-
-// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
-func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
- r.count++
-}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
new file mode 100644
index 0000000000..f3ba9015ed
--- /dev/null
+++ b/pkg/services/control/server/evacuate_async.go
@@ -0,0 +1,280 @@
+package control
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
+
+func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ if req.GetBody().GetScope() == uint32(control.StartShardEvacuationRequest_Body_NONE) {
+ return nil, status.Error(codes.InvalidArgument, "no evacuation scope")
+ }
+
+ prm := engine.EvacuateShardPrm{
+ ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
+ IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ ObjectsHandler: s.replicateObject,
+ TreeHandler: s.replicateTree,
+ Scope: engine.EvacuateScope(req.GetBody().GetScope()),
+ ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
+ ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
+ RepOneOnly: req.GetBody().GetRepOneOnly(),
+ }
+
+ if err = s.s.Evacuate(ctx, prm); err != nil {
+ var logicalErr logicerr.Logical
+ if errors.As(err, &logicalErr) {
+ return nil, status.Error(codes.Aborted, err.Error())
+ }
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.StartShardEvacuationResponse{
+ Body: &control.StartShardEvacuationResponse_Body{},
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) GetShardEvacuationStatus(ctx context.Context, req *control.GetShardEvacuationStatusRequest) (*control.GetShardEvacuationStatusResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ state, err := s.s.GetEvacuationState(ctx)
+ if err != nil {
+ var logicalErr logicerr.Logical
+ if errors.As(err, &logicalErr) {
+ return nil, status.Error(codes.Aborted, err.Error())
+ }
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp, err := stateToResponse(state)
+ if err != nil {
+ return nil, err
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) StopShardEvacuation(ctx context.Context, req *control.StopShardEvacuationRequest) (*control.StopShardEvacuationResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ err = s.s.EnqueRunningEvacuationStop(ctx)
+ if err != nil {
+ var logicalErr logicerr.Logical
+ if errors.As(err, &logicalErr) {
+ return nil, status.Error(codes.Aborted, err.Error())
+ }
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.StopShardEvacuationResponse{
+ Body: &control.StopShardEvacuationResponse_Body{},
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ s.s.ResetEvacuationStatusForShards()
+
+ return resp, nil
+}
+
+func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.ResetShardEvacuationStatusRequest) (*control.ResetShardEvacuationStatusResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ err = s.s.ResetEvacuationStatus(ctx)
+ if err != nil {
+ var logicalErr logicerr.Logical
+ if errors.As(err, &logicalErr) {
+ return nil, status.Error(codes.Aborted, err.Error())
+ }
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.ResetShardEvacuationStatusResponse{
+ Body: &control.ResetShardEvacuationStatusResponse_Body{},
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
+ cid, ok := obj.ContainerID()
+ if !ok {
+ // Return nil to prevent situations where a shard can't be evacuated
+ // because of a single bad/corrupted object.
+ return false, nil
+ }
+
+ nodes, err := s.getContainerNodes(ctx, cid)
+ if err != nil {
+ return false, err
+ }
+
+ if len(nodes) == 0 {
+ return false, nil
+ }
+
+ var res replicatorResult
+ task := replicator.Task{
+ NumCopies: 1,
+ Addr: addr,
+ Obj: obj,
+ Nodes: nodes,
+ }
+ s.replicator.HandleReplicationTask(ctx, task, &res)
+
+ if res.count == 0 {
+ return false, errors.New("object was not replicated")
+ }
+ return true, nil
+}
+
+func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
+ nodes, err := s.getContainerNodes(ctx, contID)
+ if err != nil {
+ return false, "", err
+ }
+ if len(nodes) == 0 {
+ return false, "", nil
+ }
+
+ for _, node := range nodes {
+ err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
+ if err == nil {
+ return true, hex.EncodeToString(node.PublicKey()), nil
+ }
+ }
+ return false, "", err
+}
+
+func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
+ rawCID := make([]byte, sha256.Size)
+ contID.Encode(rawCID)
+
+ var height uint64
+ for {
+ op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
+ if err != nil {
+ return err
+ }
+
+ if op.Time == 0 {
+ return nil
+ }
+
+ req := &tree.ApplyRequest{
+ Body: &tree.ApplyRequest_Body{
+ ContainerId: rawCID,
+ TreeId: treeID,
+ Operation: &tree.LogMove{
+ ParentId: op.Parent,
+ Meta: op.Bytes(),
+ ChildId: op.Child,
+ },
+ },
+ }
+
+ err = tree.SignMessage(req, s.key)
+ if err != nil {
+ return fmt.Errorf("can't message apply request: %w", err)
+ }
+
+ err = s.treeService.ReplicateTreeOp(ctx, node, req)
+ if err != nil {
+ return err
+ }
+
+ height = op.Time + 1
+ }
+}
+
+func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) {
+ nm, err := s.netMapSrc.GetNetMap(ctx, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := s.cnrSrc.Get(ctx, contID)
+ if err != nil {
+ return nil, err
+ }
+
+ binCnr := make([]byte, sha256.Size)
+ contID.Encode(binCnr)
+
+ ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
+ if err != nil {
+ return nil, errFailedToBuildListOfContainerNodes
+ }
+
+ nodes := placement.FlattenNodes(ns)
+ bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
+ for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
+ if bytes.Equal(nodes[i].PublicKey(), bs) {
+ copy(nodes[i:], nodes[i+1:])
+ nodes = nodes[:len(nodes)-1]
+ }
+ }
+ return nodes, nil
+}
+
+type replicatorResult struct {
+ count int
+}
+
+// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
+func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
+ r.count++
+}
diff --git a/pkg/services/control/server/flush_cache.go b/pkg/services/control/server/flush_cache.go
index fdfd136a6d..031002d717 100644
--- a/pkg/services/control/server/flush_cache.go
+++ b/pkg/services/control/server/flush_cache.go
@@ -5,11 +5,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
-func (s *Server) FlushCache(_ context.Context, req *control.FlushCacheRequest) (*control.FlushCacheResponse, error) {
+func (s *Server) FlushCache(ctx context.Context, req *control.FlushCacheRequest) (*control.FlushCacheResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -18,8 +19,9 @@ func (s *Server) FlushCache(_ context.Context, req *control.FlushCacheRequest) (
for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
var prm engine.FlushWriteCachePrm
prm.SetShardID(shardID)
+ prm.SetSeal(req.GetBody().GetSeal())
- _, err = s.s.FlushWriteCache(prm)
+ _, err = s.s.FlushWriteCache(ctx, prm)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@@ -27,7 +29,7 @@ func (s *Server) FlushCache(_ context.Context, req *control.FlushCacheRequest) (
resp := &control.FlushCacheResponse{Body: &control.FlushCacheResponse_Body{}}
- err = SignMessage(s.key, resp)
+ err = ctrlmessage.Sign(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go
index ea1f626f65..a8ef7809ea 100644
--- a/pkg/services/control/server/gc.go
+++ b/pkg/services/control/server/gc.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -17,7 +18,7 @@ import (
//
// If some address is not a valid object address in a binary format, an error returns.
// If request is unsigned or signed by disallowed key, permission error returns.
-func (s *Server) DropObjects(_ context.Context, req *control.DropObjectsRequest) (*control.DropObjectsResponse, error) {
+func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsRequest) (*control.DropObjectsResponse, error) {
// verify request
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -41,8 +42,7 @@ func (s *Server) DropObjects(_ context.Context, req *control.DropObjectsRequest)
prm.WithForceRemoval()
prm.WithAddress(addrList[i])
- _, err := s.s.Delete(prm)
- if err != nil && firstErr == nil {
+ if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil {
firstErr = err
}
}
@@ -58,7 +58,7 @@ func (s *Server) DropObjects(_ context.Context, req *control.DropObjectsRequest)
resp.SetBody(body)
// sign the response
- if err := SignMessage(s.key, resp); err != nil {
+ if err := ctrlmessage.Sign(s.key, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go
new file mode 100644
index 0000000000..5e04969100
--- /dev/null
+++ b/pkg/services/control/server/get_netmap_status.go
@@ -0,0 +1,35 @@
+package control
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// GetNetmapStatus gets node status in FrostFS network.
+func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ st, epoch, err := s.nodeState.GetNetmapStatus(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ resp := &control.GetNetmapStatusResponse{
+ Body: &control.GetNetmapStatusResponse_Body{
+ Status: st,
+ Epoch: epoch,
+ },
+ }
+
+ if err := ctrlmessage.Sign(s.key, resp); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ return resp, nil
+}
diff --git a/pkg/services/control/server/healthcheck.go b/pkg/services/control/server/healthcheck.go
index 9e87caa49c..121c51280b 100644
--- a/pkg/services/control/server/healthcheck.go
+++ b/pkg/services/control/server/healthcheck.go
@@ -4,6 +4,7 @@ import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -27,7 +28,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest)
body.SetHealthStatus(s.healthChecker.HealthStatus())
// sign the response
- if err := SignMessage(s.key, resp); err != nil {
+ if err := ctrlmessage.Sign(s.key, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/list_shards.go b/pkg/services/control/server/list_shards.go
index a020547a2a..efe2754eaa 100644
--- a/pkg/services/control/server/list_shards.go
+++ b/pkg/services/control/server/list_shards.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -24,15 +25,15 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
info := s.s.DumpInfo()
- shardInfos := make([]*control.ShardInfo, 0, len(info.Shards))
+ shardInfos := make([]control.ShardInfo, 0, len(info.Shards))
for _, sh := range info.Shards {
si := new(control.ShardInfo)
- si.SetID(*sh.ID)
+ si.SetShard_ID(*sh.ID)
si.SetMetabasePath(sh.MetaBaseInfo.Path)
si.Blobstor = blobstorInfoToProto(sh.BlobStorInfo)
- si.SetWriteCachePath(sh.WriteCacheInfo.Path)
+ si.SetWritecachePath(sh.WriteCacheInfo.Path)
si.SetPiloramaPath(sh.PiloramaInfo.Path)
var m control.ShardMode
@@ -52,24 +53,25 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
si.SetMode(m)
si.SetErrorCount(sh.ErrorCount)
+ si.SetEvacuationInProgress(sh.EvacuationInProgress)
- shardInfos = append(shardInfos, si)
+ shardInfos = append(shardInfos, *si)
}
body.SetShards(shardInfos)
// sign the response
- if err := SignMessage(s.key, resp); err != nil {
+ if err := ctrlmessage.Sign(s.key, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return resp, nil
}
-func blobstorInfoToProto(info blobstor.Info) []*control.BlobstorInfo {
- res := make([]*control.BlobstorInfo, len(info.SubStorages))
+func blobstorInfoToProto(info blobstor.Info) []control.BlobstorInfo {
+ res := make([]control.BlobstorInfo, len(info.SubStorages))
for i := range info.SubStorages {
- res[i] = &control.BlobstorInfo{
+ res[i] = control.BlobstorInfo{
Path: info.SubStorages[i].Path,
Type: info.SubStorages[i].Type,
}
diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go
new file mode 100644
index 0000000000..39565ed50a
--- /dev/null
+++ b/pkg/services/control/server/list_shards_for_object.go
@@ -0,0 +1,65 @@
+package control
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ var obj oid.ID
+ err = obj.DecodeString(req.GetBody().GetObjectId())
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ var cnr cid.ID
+ err = cnr.DecodeString(req.GetBody().GetContainerId())
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ resp := new(control.ListShardsForObjectResponse)
+ body := new(control.ListShardsForObjectResponse_Body)
+ resp.SetBody(body)
+
+ var objAddr oid.Address
+ objAddr.SetContainer(cnr)
+ objAddr.SetObject(obj)
+ info, err := s.s.ListShardsForObject(ctx, objAddr)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ if len(info) == 0 {
+ return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject)
+ }
+
+ body.SetShard_ID(shardInfoToProto(info))
+
+ // Sign the response
+ if err := ctrlmessage.Sign(s.key, resp); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func shardInfoToProto(infos []shard.Info) [][]byte {
+ shardInfos := make([][]byte, 0, len(infos))
+ for _, info := range infos {
+ shardInfos = append(shardInfos, *info.ID)
+ }
+
+ return shardInfos
+}
diff --git a/pkg/services/control/server/policy_engine.go b/pkg/services/control/server/policy_engine.go
new file mode 100644
index 0000000000..ab8258e27e
--- /dev/null
+++ b/pkg/services/control/server/policy_engine.go
@@ -0,0 +1,250 @@
+package control
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func apeTarget(chainTarget *control.ChainTarget) (engine.Target, error) {
+ switch chainTarget.GetType() {
+ case control.ChainTarget_CONTAINER:
+ return engine.ContainerTarget(chainTarget.GetName()), nil
+ case control.ChainTarget_NAMESPACE:
+ return engine.NamespaceTarget(chainTarget.GetName()), nil
+ case control.ChainTarget_USER:
+ return engine.UserTarget(chainTarget.GetName()), nil
+ case control.ChainTarget_GROUP:
+ return engine.GroupTarget(chainTarget.GetName()), nil
+ default:
+ }
+ return engine.Target{}, status.Error(codes.InvalidArgument,
+ fmt.Errorf("target type is not supported: %s", chainTarget.GetType().String()).Error())
+}
+
+func controlTarget(chainTarget *engine.Target) (control.ChainTarget, error) {
+ switch chainTarget.Type {
+ case engine.Container:
+ return control.ChainTarget{
+ Name: chainTarget.Name,
+ Type: control.ChainTarget_CONTAINER,
+ }, nil
+ case engine.Namespace:
+ // If namespace is empty, we take it for root namespace.
+ nm := chainTarget.Name
+ if nm == "root" {
+ nm = ""
+ }
+ return control.ChainTarget{
+ Name: nm,
+ Type: control.ChainTarget_NAMESPACE,
+ }, nil
+ case engine.User:
+ return control.ChainTarget{
+ Name: chainTarget.Name,
+ Type: control.ChainTarget_USER,
+ }, nil
+ case engine.Group:
+ return control.ChainTarget{
+ Name: chainTarget.Name,
+ Type: control.ChainTarget_GROUP,
+ }, nil
+ default:
+ }
+ return control.ChainTarget{}, status.Error(codes.InvalidArgument,
+ fmt.Errorf("target type is not supported: %c", chainTarget.Type).Error())
+}
+
+func (s *Server) AddChainLocalOverride(_ context.Context, req *control.AddChainLocalOverrideRequest) (*control.AddChainLocalOverrideResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ var chain apechain.Chain
+ if err := chain.DecodeBytes(req.GetBody().GetChain()); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+ for _, rule := range chain.Rules {
+ for _, name := range rule.Resources.Names {
+ if err := ape.ValidateResourceName(name); err != nil {
+ return nil, status.Error(codes.InvalidArgument, fmt.Errorf("invalid resource: %w", err).Error())
+ }
+ }
+ }
+
+ s.apeChainCounter.Add(1)
+ // TODO (aarifullin): the such chain id is not well-designed yet.
+ if len(chain.ID) == 0 {
+ chain.ID = apechain.ID(fmt.Sprintf("%s:%d", apechain.Ingress, s.apeChainCounter.Load()))
+ }
+
+ target, err := apeTarget(req.GetBody().GetTarget())
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err = s.localOverrideStorage.LocalStorage().AddOverride(apechain.Ingress, target, &chain); err != nil {
+ return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
+ }
+
+ resp := &control.AddChainLocalOverrideResponse{
+ Body: &control.AddChainLocalOverrideResponse_Body{
+ ChainId: []byte(chain.ID),
+ },
+ }
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) GetChainLocalOverride(_ context.Context, req *control.GetChainLocalOverrideRequest) (*control.GetChainLocalOverrideResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ target, err := apeTarget(req.GetBody().GetTarget())
+ if err != nil {
+ return nil, err
+ }
+ chain, err := s.localOverrideStorage.LocalStorage().GetOverride(apechain.Ingress, target, apechain.ID(req.GetBody().GetChainId()))
+ if err != nil {
+ return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
+ }
+
+ resp := &control.GetChainLocalOverrideResponse{
+ Body: &control.GetChainLocalOverrideResponse_Body{
+ Chain: chain.Bytes(),
+ },
+ }
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) ListChainLocalOverrides(_ context.Context, req *control.ListChainLocalOverridesRequest) (*control.ListChainLocalOverridesResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ target, err := apeTarget(req.GetBody().GetTarget())
+ if err != nil {
+ return nil, err
+ }
+
+ chains, err := s.localOverrideStorage.LocalStorage().ListOverrides(apechain.Ingress, target)
+ if err != nil {
+ return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
+ }
+ serializedChains := make([][]byte, 0, len(chains))
+ for _, chain := range chains {
+ serializedChains = append(serializedChains, chain.Bytes())
+ }
+
+ resp := &control.ListChainLocalOverridesResponse{
+ Body: &control.ListChainLocalOverridesResponse_Body{
+ Chains: serializedChains,
+ },
+ }
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) RemoveChainLocalOverride(_ context.Context, req *control.RemoveChainLocalOverrideRequest) (*control.RemoveChainLocalOverrideResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ target, err := apeTarget(req.GetBody().GetTarget())
+ if err != nil {
+ return nil, err
+ }
+
+ if err = s.localOverrideStorage.LocalStorage().RemoveOverride(apechain.Ingress, target, req.GetBody().GetChainId()); err != nil {
+ return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
+ }
+ resp := &control.RemoveChainLocalOverrideResponse{
+ Body: &control.RemoveChainLocalOverrideResponse_Body{},
+ }
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) RemoveChainLocalOverridesByTarget(_ context.Context, req *control.RemoveChainLocalOverridesByTargetRequest) (*control.RemoveChainLocalOverridesByTargetResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ target, err := apeTarget(req.GetBody().GetTarget())
+ if err != nil {
+ return nil, err
+ }
+
+ if err = s.localOverrideStorage.LocalStorage().RemoveOverridesByTarget(apechain.Ingress, target); err != nil {
+ return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
+ }
+ resp := &control.RemoveChainLocalOverridesByTargetResponse{
+ Body: &control.RemoveChainLocalOverridesByTargetResponse_Body{},
+ }
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) ListTargetsLocalOverrides(_ context.Context, req *control.ListTargetsLocalOverridesRequest) (*control.ListTargetsLocalOverridesResponse, error) {
+ if err := s.isValidRequest(req); err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ apeChainName := apechain.Name(req.GetBody().GetChainName())
+ apeTargets, err := s.localOverrideStorage.LocalStorage().ListOverrideDefinedTargets(apeChainName)
+ if err != nil {
+ return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
+ }
+ targets := make([]control.ChainTarget, 0, len(apeTargets))
+ for i := range apeTargets {
+ target, err := controlTarget(&apeTargets[i])
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ targets = append(targets, target)
+ }
+
+ resp := &control.ListTargetsLocalOverridesResponse{
+ Body: &control.ListTargetsLocalOverridesResponse_Body{
+ Targets: targets,
+ },
+ }
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func getCodeByLocalStorageErr(err error) codes.Code {
+ if errors.Is(err, engine.ErrChainNotFound) || errors.Is(err, engine.ErrChainNameNotFound) ||
+ errors.Is(err, engine.ErrResourceNotFound) {
+ return codes.NotFound
+ }
+ return codes.Internal
+}
diff --git a/pkg/services/control/server/rebuild.go b/pkg/services/control/server/rebuild.go
new file mode 100644
index 0000000000..6ddfb8bf4c
--- /dev/null
+++ b/pkg/services/control/server/rebuild.go
@@ -0,0 +1,59 @@
+package control
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) StartShardRebuild(ctx context.Context, req *control.StartShardRebuildRequest) (*control.StartShardRebuildResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ if req.GetBody().GetConcurrencyLimit() == 0 || req.GetBody().GetConcurrencyLimit() > 10000 {
+ return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("concurrency limit must be in range (0; 10 000], current value %d", req.GetBody().GetConcurrencyLimit()))
+ }
+
+ if req.GetBody().GetTargetFillPercent() == 0 || req.GetBody().GetTargetFillPercent() > 100 {
+ return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("fill percent must be in range (0; 100], current value %d", req.GetBody().GetTargetFillPercent()))
+ }
+
+ prm := engine.RebuildPrm{
+ ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()),
+ ConcurrencyLimit: req.GetBody().GetConcurrencyLimit(),
+ TargetFillPercent: req.GetBody().GetTargetFillPercent(),
+ }
+
+ res, err := s.s.Rebuild(ctx, prm)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.StartShardRebuildResponse{Body: &control.StartShardRebuildResponse_Body{}}
+ for _, r := range res.ShardResults {
+ if r.Success {
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{
+ Shard_ID: *r.ShardID,
+ Success: true,
+ })
+ } else {
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{
+ Shard_ID: *r.ShardID,
+ Error: r.ErrorMsg,
+ })
+ }
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/pkg/services/control/server/restore.go b/pkg/services/control/server/restore.go
deleted file mode 100644
index 0e6367951a..0000000000
--- a/pkg/services/control/server/restore.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package control
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) RestoreShard(_ context.Context, req *control.RestoreShardRequest) (*control.RestoreShardResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID())
-
- var prm shard.RestorePrm
- prm.WithPath(req.GetBody().GetFilepath())
- prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
-
- err = s.s.RestoreShard(shardID, prm)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := new(control.RestoreShardResponse)
- resp.SetBody(new(control.RestoreShardResponse_Body))
-
- err = SignMessage(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
diff --git a/pkg/services/control/server/seal_writecache.go b/pkg/services/control/server/seal_writecache.go
new file mode 100644
index 0000000000..6799bdcac5
--- /dev/null
+++ b/pkg/services/control/server/seal_writecache.go
@@ -0,0 +1,52 @@
+package control
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCacheRequest) (*control.SealWriteCacheResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ prm := engine.SealWriteCachePrm{
+ ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()),
+ IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ Async: req.GetBody().GetAsync(),
+ RestoreMode: req.GetBody().GetRestoreMode(),
+ Shrink: req.GetBody().GetShrink(),
+ }
+
+ res, err := s.s.SealWriteCache(ctx, prm)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.SealWriteCacheResponse{Body: &control.SealWriteCacheResponse_Body{}}
+ for _, r := range res.ShardResults {
+ if r.Success {
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{
+ Shard_ID: *r.ShardID,
+ Success: true,
+ })
+ } else {
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{
+ Shard_ID: *r.ShardID,
+ Error: r.ErrorMsg,
+ })
+ }
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go
index a0ad44e234..59d701bc63 100644
--- a/pkg/services/control/server/server.go
+++ b/pkg/services/control/server/server.go
@@ -1,31 +1,39 @@
package control
import (
+ "context"
"crypto/ecdsa"
+ "sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
)
// Server is an entity that serves
// Control service on storage node.
type Server struct {
*cfg
+
+ // TODO (aarifullin): this counter is used to assign id for rule chains
+ // added as local overrides and will be removed as soon as in-memory
+ // implementation will be replaced.
+ apeChainCounter atomic.Uint32
}
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // Must calculate and return current status of the node in FrostFS network map.
+ // NetmapStatus must calculate and return current status of the node in FrostFS network map.
//
// If status can not be calculated for any reason,
// control.netmapStatus_STATUS_UNDEFINED should be returned.
NetmapStatus() control.NetmapStatus
- // Must calculate and return current health status of the node application.
+ // HealthStatus must calculate and return current health status of the node application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
@@ -38,11 +46,21 @@ type NodeState interface {
//
// If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed
// in the network settings, the node additionally starts local maintenance.
- SetNetmapStatus(st control.NetmapStatus) error
+ SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error
// ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE)
// but starts local maintenance regardless of the network settings.
- ForceMaintenance() error
+ ForceMaintenance(ctx context.Context) error
+
+ GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error)
+}
+
+// LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine
+// interface methods.
+type LocalOverrideStorageDecorator interface {
+ // LocalStorage method can be decorated by using sync primitives in the case if the local
+ // override storage state should be consistent for chain router.
+ LocalStorage() policyengine.LocalOverrideStorage
}
// Option of the Server's constructor.
@@ -59,6 +77,8 @@ type cfg struct {
cnrSrc container.Source
+ localOverrideStorage LocalOverrideStorageDecorator
+
replicator *replicator.Replicator
nodeState NodeState
@@ -151,3 +171,11 @@ func WithTreeService(s TreeService) Option {
c.treeService = s
}
}
+
+// WithLocalOverrideStorage returns the option to set access policy engine
+// chain override storage.
+func WithLocalOverrideStorage(localOverrideStorage LocalOverrideStorageDecorator) Option {
+ return func(c *cfg) {
+ c.localOverrideStorage = localOverrideStorage
+ }
+}
diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go
index ba7e682c62..529041dcad 100644
--- a/pkg/services/control/server/set_netmap_status.go
+++ b/pkg/services/control/server/set_netmap_status.go
@@ -4,6 +4,7 @@ import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -28,9 +29,9 @@ func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStat
"force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE)
}
- err = s.nodeState.ForceMaintenance()
+ err = s.nodeState.ForceMaintenance(ctx)
} else {
- err = s.nodeState.SetNetmapStatus(st)
+ err = s.nodeState.SetNetmapStatus(ctx, st)
}
if err != nil {
@@ -44,7 +45,7 @@ func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStat
resp.SetBody(body)
// sign the response
- if err := SignMessage(s.key, resp); err != nil {
+ if err := ctrlmessage.Sign(s.key, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/set_shard_mode.go b/pkg/services/control/server/set_shard_mode.go
index 274e2f65d3..4f87962636 100644
--- a/pkg/services/control/server/set_shard_mode.go
+++ b/pkg/services/control/server/set_shard_mode.go
@@ -6,11 +6,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
-func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
+func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
// verify request
err := s.isValidRequest(req)
if err != nil {
@@ -36,8 +37,8 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown shard mode: %s", requestedMode))
}
- for _, shardID := range s.getShardIDList(req.Body.GetShard_ID()) {
- err = s.s.SetShardMode(shardID, m, req.Body.GetResetErrorCounter())
+ for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
+ err = s.s.SetShardMode(ctx, shardID, m, req.GetBody().GetResetErrorCounter())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@@ -50,7 +51,7 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques
resp.SetBody(body)
// sign the response
- err = SignMessage(s.key, resp)
+ err = ctrlmessage.Sign(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/sign.go b/pkg/services/control/server/sign.go
index 726cdf3417..0e8e24b6e9 100644
--- a/pkg/services/control/server/sign.go
+++ b/pkg/services/control/server/sign.go
@@ -2,29 +2,20 @@ package control
import (
"bytes"
- "crypto/ecdsa"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
- frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
)
-// SignedMessage is an interface of Control service message.
-type SignedMessage interface {
- ReadSignedData([]byte) ([]byte, error)
- GetSignature() *control.Signature
- SetSignature(*control.Signature)
-}
-
var errDisallowedKey = errors.New("key is not in the allowed list")
-func (s *Server) isValidRequest(req SignedMessage) error {
+func (s *Server) isValidRequest(req ctrlmessage.SignedMessage) error {
sign := req.GetSignature()
if sign == nil {
- // TODO(@cthulhu-rider): #1387 use "const" error
+ // TODO(@cthulhu-rider): #468 use "const" error
return errors.New("missing signature")
}
@@ -50,7 +41,7 @@ func (s *Server) isValidRequest(req SignedMessage) error {
return fmt.Errorf("marshal request body: %w", err)
}
- // TODO(@cthulhu-rider): #1387 use Signature message from NeoFS API to avoid conversion
+ // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion
var sigV2 refs.Signature
sigV2.SetKey(sign.GetKey())
sigV2.SetSign(sign.GetSign())
@@ -62,36 +53,9 @@ func (s *Server) isValidRequest(req SignedMessage) error {
}
if !sig.Verify(binBody) {
- // TODO(@cthulhu-rider): #1387 use "const" error
+ // TODO(@cthulhu-rider): #468 use "const" error
return errors.New("invalid signature")
}
return nil
}
-
-// SignMessage signs Control service message with private key.
-func SignMessage(key *ecdsa.PrivateKey, msg SignedMessage) error {
- binBody, err := msg.ReadSignedData(nil)
- if err != nil {
- return fmt.Errorf("marshal request body: %w", err)
- }
-
- var sig frostfscrypto.Signature
-
- err = sig.Calculate(frostfsecdsa.Signer(*key), binBody)
- if err != nil {
- return fmt.Errorf("calculate signature: %w", err)
- }
-
- // TODO(@cthulhu-rider): #1387 use Signature message from NeoFS API to avoid conversion
- var sigV2 refs.Signature
- sig.WriteToV2(&sigV2)
-
- var sigControl control.Signature
- sigControl.SetKey(sigV2.GetKey())
- sigControl.SetSign(sigV2.GetSign())
-
- msg.SetSignature(&sigControl)
-
- return nil
-}
diff --git a/pkg/services/control/server/syncronize_tree.go b/pkg/services/control/server/syncronize_tree.go
index dce3e8831a..b2a966b2ca 100644
--- a/pkg/services/control/server/syncronize_tree.go
+++ b/pkg/services/control/server/syncronize_tree.go
@@ -4,14 +4,18 @@ import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// TreeService represents a tree service instance.
type TreeService interface {
- Synchronize(ctx context.Context, cnr cid.ID, treeID string) error
+ SynchronizeTree(ctx context.Context, cnr cid.ID, treeID string) error
+ ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req *tree.ApplyRequest) error
}
func (s *Server) SynchronizeTree(ctx context.Context, req *control.SynchronizeTreeRequest) (*control.SynchronizeTreeResponse, error) {
@@ -31,7 +35,7 @@ func (s *Server) SynchronizeTree(ctx context.Context, req *control.SynchronizeTr
return nil, status.Error(codes.Internal, err.Error())
}
- err = s.treeService.Synchronize(ctx, cnr, b.GetTreeId())
+ err = s.treeService.SynchronizeTree(ctx, cnr, b.GetTreeId())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@@ -39,7 +43,7 @@ func (s *Server) SynchronizeTree(ctx context.Context, req *control.SynchronizeTr
resp := new(control.SynchronizeTreeResponse)
resp.SetBody(new(control.SynchronizeTreeResponse_Body))
- err = SignMessage(s.key, resp)
+ err = ctrlmessage.Sign(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/service.go b/pkg/services/control/service.go
deleted file mode 100644
index dd349dc57a..0000000000
--- a/pkg/services/control/service.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package control
-
-// SetBody sets health check request body.
-func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetNetmapStatus sets status of the storage node in FrostFS network map.
-func (x *HealthCheckResponse_Body) SetNetmapStatus(v NetmapStatus) {
- if x != nil {
- x.NetmapStatus = v
- }
-}
-
-// SetHealthStatus sets health status of the storage node application.
-func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
- if x != nil {
- x.HealthStatus = v
- }
-}
-
-// SetBody sets health check response body.
-func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetStatus sets new storage node status in FrostFS network map.
-func (x *SetNetmapStatusRequest_Body) SetStatus(v NetmapStatus) {
- if x != nil {
- x.Status = v
- }
-}
-
-// SetForceMaintenance sets force_maintenance flag in the message.
-func (x *SetNetmapStatusRequest_Body) SetForceMaintenance() {
- x.ForceMaintenance = true
-}
-
-// SetBody sets body of the set netmap status request .
-func (x *SetNetmapStatusRequest) SetBody(v *SetNetmapStatusRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets set body of the netmap status response.
-func (x *SetNetmapStatusResponse) SetBody(v *SetNetmapStatusResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetAddressList sets list of objects to be removed in FrostFS API binary format.
-func (x *DropObjectsRequest_Body) SetAddressList(v [][]byte) {
- if x != nil {
- x.AddressList = v
- }
-}
-
-// SetBody sets body of the set "Drop objects" request.
-func (x *DropObjectsRequest) SetBody(v *DropObjectsRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets set body of the "Drop objects" response.
-func (x *DropObjectsResponse) SetBody(v *DropObjectsResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets list shards request body.
-func (x *ListShardsRequest) SetBody(v *ListShardsRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetShards sets shards of the storage node.
-func (x *ListShardsResponse_Body) SetShards(v []*ShardInfo) {
- if x != nil {
- x.Shards = v
- }
-}
-
-// SetBody sets list shards response body.
-func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetShardIDList sets shard ID whose mode is requested to be set.
-func (x *SetShardModeRequest_Body) SetShardIDList(v [][]byte) {
- if v != nil {
- x.Shard_ID = v
- }
-}
-
-// SetMode sets mode of the shard.
-func (x *SetShardModeRequest_Body) SetMode(v ShardMode) {
- x.Mode = v
-}
-
-// ClearErrorCounter sets flag signifying whether error counter for shard should be cleared.
-func (x *SetShardModeRequest_Body) ClearErrorCounter(reset bool) {
- x.ResetErrorCounter = reset
-}
-
-// SetBody sets request body.
-func (x *SetShardModeRequest) SetBody(v *SetShardModeRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets body of the set shard mode response.
-func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetShardID sets shard ID for the dump shard request.
-func (x *DumpShardRequest_Body) SetShardID(id []byte) {
- x.Shard_ID = id
-}
-
-// SetFilepath sets filepath for the dump shard request.
-func (x *DumpShardRequest_Body) SetFilepath(p string) {
- x.Filepath = p
-}
-
-// SetIgnoreErrors sets ignore errors flag for the dump shard request.
-func (x *DumpShardRequest_Body) SetIgnoreErrors(ignore bool) {
- x.IgnoreErrors = ignore
-}
-
-// SetBody sets request body.
-func (x *DumpShardRequest) SetBody(v *DumpShardRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets response body.
-func (x *DumpShardResponse) SetBody(v *DumpShardResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetShardID sets shard ID for the restore shard request.
-func (x *RestoreShardRequest_Body) SetShardID(id []byte) {
- x.Shard_ID = id
-}
-
-// SetFilepath sets filepath for the restore shard request.
-func (x *RestoreShardRequest_Body) SetFilepath(p string) {
- x.Filepath = p
-}
-
-// SetIgnoreErrors sets ignore errors flag for the restore shard request.
-func (x *RestoreShardRequest_Body) SetIgnoreErrors(ignore bool) {
- x.IgnoreErrors = ignore
-}
-
-// SetBody sets request body.
-func (x *RestoreShardRequest) SetBody(v *RestoreShardRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets response body.
-func (x *RestoreShardResponse) SetBody(v *RestoreShardResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets list shards request body.
-func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets list shards response body.
-func (x *SynchronizeTreeResponse) SetBody(v *SynchronizeTreeResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go
deleted file mode 100644
index 78e7b0598d..0000000000
--- a/pkg/services/control/service.pb.go
+++ /dev/null
@@ -1,3092 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.21.12
-// source: pkg/services/control/service.proto
-
-package control
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Health check request.
-type HealthCheckRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of health check request message.
- Body *HealthCheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthCheckRequest) Reset() {
- *x = HealthCheckRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckRequest) ProtoMessage() {}
-
-func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
-func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthCheckRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Health check request.
-type HealthCheckResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of health check response message.
- Body *HealthCheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthCheckResponse) Reset() {
- *x = HealthCheckResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckResponse) ProtoMessage() {}
-
-func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
-func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthCheckResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Set netmap status request.
-type SetNetmapStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of set netmap status request message.
- Body *SetNetmapStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SetNetmapStatusRequest) Reset() {
- *x = SetNetmapStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetNetmapStatusRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetNetmapStatusRequest) ProtoMessage() {}
-
-func (x *SetNetmapStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetNetmapStatusRequest.ProtoReflect.Descriptor instead.
-func (*SetNetmapStatusRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *SetNetmapStatusRequest) GetBody() *SetNetmapStatusRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SetNetmapStatusRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Set netmap status response.
-type SetNetmapStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of set netmap status response message.
- Body *SetNetmapStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SetNetmapStatusResponse) Reset() {
- *x = SetNetmapStatusResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetNetmapStatusResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetNetmapStatusResponse) ProtoMessage() {}
-
-func (x *SetNetmapStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetNetmapStatusResponse.ProtoReflect.Descriptor instead.
-func (*SetNetmapStatusResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *SetNetmapStatusResponse) GetBody() *SetNetmapStatusResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SetNetmapStatusResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Request to drop the objects.
-type DropObjectsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of the request message.
- Body *DropObjectsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DropObjectsRequest) Reset() {
- *x = DropObjectsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DropObjectsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DropObjectsRequest) ProtoMessage() {}
-
-func (x *DropObjectsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DropObjectsRequest.ProtoReflect.Descriptor instead.
-func (*DropObjectsRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *DropObjectsRequest) GetBody() *DropObjectsRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DropObjectsRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Response to request to drop the objects.
-type DropObjectsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of the response message.
- Body *DropObjectsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DropObjectsResponse) Reset() {
- *x = DropObjectsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DropObjectsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DropObjectsResponse) ProtoMessage() {}
-
-func (x *DropObjectsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DropObjectsResponse.ProtoReflect.Descriptor instead.
-func (*DropObjectsResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *DropObjectsResponse) GetBody() *DropObjectsResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DropObjectsResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Request to list all shards of the node.
-type ListShardsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of the request message.
- Body *ListShardsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ListShardsRequest) Reset() {
- *x = ListShardsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListShardsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListShardsRequest) ProtoMessage() {}
-
-func (x *ListShardsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListShardsRequest.ProtoReflect.Descriptor instead.
-func (*ListShardsRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *ListShardsRequest) GetBody() *ListShardsRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ListShardsRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// ListShards response.
-type ListShardsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of the response message.
- Body *ListShardsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ListShardsResponse) Reset() {
- *x = ListShardsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListShardsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListShardsResponse) ProtoMessage() {}
-
-func (x *ListShardsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListShardsResponse.ProtoReflect.Descriptor instead.
-func (*ListShardsResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *ListShardsResponse) GetBody() *ListShardsResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ListShardsResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Request to set mode of the shard.
-type SetShardModeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of set shard mode request message.
- Body *SetShardModeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SetShardModeRequest) Reset() {
- *x = SetShardModeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetShardModeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetShardModeRequest) ProtoMessage() {}
-
-func (x *SetShardModeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetShardModeRequest.ProtoReflect.Descriptor instead.
-func (*SetShardModeRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *SetShardModeRequest) GetBody() *SetShardModeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SetShardModeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// SetShardMode response.
-type SetShardModeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of set shard mode response message.
- Body *SetShardModeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SetShardModeResponse) Reset() {
- *x = SetShardModeResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetShardModeResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetShardModeResponse) ProtoMessage() {}
-
-func (x *SetShardModeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetShardModeResponse.ProtoReflect.Descriptor instead.
-func (*SetShardModeResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *SetShardModeResponse) GetBody() *SetShardModeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SetShardModeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// DumpShard request.
-type DumpShardRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of dump shard request message.
- Body *DumpShardRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DumpShardRequest) Reset() {
- *x = DumpShardRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DumpShardRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DumpShardRequest) ProtoMessage() {}
-
-func (x *DumpShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DumpShardRequest.ProtoReflect.Descriptor instead.
-func (*DumpShardRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *DumpShardRequest) GetBody() *DumpShardRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DumpShardRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// DumpShard response.
-type DumpShardResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of dump shard response message.
- Body *DumpShardResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DumpShardResponse) Reset() {
- *x = DumpShardResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DumpShardResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DumpShardResponse) ProtoMessage() {}
-
-func (x *DumpShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DumpShardResponse.ProtoReflect.Descriptor instead.
-func (*DumpShardResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *DumpShardResponse) GetBody() *DumpShardResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DumpShardResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// RestoreShard request.
-type RestoreShardRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of restore shard request message.
- Body *RestoreShardRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RestoreShardRequest) Reset() {
- *x = RestoreShardRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreShardRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreShardRequest) ProtoMessage() {}
-
-func (x *RestoreShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreShardRequest.ProtoReflect.Descriptor instead.
-func (*RestoreShardRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *RestoreShardRequest) GetBody() *RestoreShardRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RestoreShardRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// RestoreShard response.
-type RestoreShardResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of restore shard response message.
- Body *RestoreShardResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RestoreShardResponse) Reset() {
- *x = RestoreShardResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreShardResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreShardResponse) ProtoMessage() {}
-
-func (x *RestoreShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreShardResponse.ProtoReflect.Descriptor instead.
-func (*RestoreShardResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *RestoreShardResponse) GetBody() *RestoreShardResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RestoreShardResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// SynchronizeTree request.
-type SynchronizeTreeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of restore shard request message.
- Body *SynchronizeTreeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SynchronizeTreeRequest) Reset() {
- *x = SynchronizeTreeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SynchronizeTreeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SynchronizeTreeRequest) ProtoMessage() {}
-
-func (x *SynchronizeTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SynchronizeTreeRequest.ProtoReflect.Descriptor instead.
-func (*SynchronizeTreeRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SynchronizeTreeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// SynchronizeTree response.
-type SynchronizeTreeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of restore shard response message.
- Body *SynchronizeTreeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SynchronizeTreeResponse) Reset() {
- *x = SynchronizeTreeResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SynchronizeTreeResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SynchronizeTreeResponse) ProtoMessage() {}
-
-func (x *SynchronizeTreeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SynchronizeTreeResponse.ProtoReflect.Descriptor instead.
-func (*SynchronizeTreeResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SynchronizeTreeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// EvacuateShard request.
-type EvacuateShardRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *EvacuateShardRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *EvacuateShardRequest) Reset() {
- *x = EvacuateShardRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EvacuateShardRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EvacuateShardRequest) ProtoMessage() {}
-
-func (x *EvacuateShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EvacuateShardRequest.ProtoReflect.Descriptor instead.
-func (*EvacuateShardRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *EvacuateShardRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// EvacuateShard response.
-type EvacuateShardResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *EvacuateShardResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *EvacuateShardResponse) Reset() {
- *x = EvacuateShardResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EvacuateShardResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EvacuateShardResponse) ProtoMessage() {}
-
-func (x *EvacuateShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EvacuateShardResponse.ProtoReflect.Descriptor instead.
-func (*EvacuateShardResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *EvacuateShardResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// FlushCache request.
-type FlushCacheRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *FlushCacheRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *FlushCacheRequest) Reset() {
- *x = FlushCacheRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FlushCacheRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FlushCacheRequest) ProtoMessage() {}
-
-func (x *FlushCacheRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FlushCacheRequest.ProtoReflect.Descriptor instead.
-func (*FlushCacheRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *FlushCacheRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// FlushCache response.
-type FlushCacheResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *FlushCacheResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *FlushCacheResponse) Reset() {
- *x = FlushCacheResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FlushCacheResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FlushCacheResponse) ProtoMessage() {}
-
-func (x *FlushCacheResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FlushCacheResponse.ProtoReflect.Descriptor instead.
-func (*FlushCacheResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *FlushCacheResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Health check request body.
-type HealthCheckRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *HealthCheckRequest_Body) Reset() {
- *x = HealthCheckRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckRequest_Body) ProtoMessage() {}
-
-func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckRequest_Body.ProtoReflect.Descriptor instead.
-func (*HealthCheckRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// Health check response body
-type HealthCheckResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Status of the storage node in FrostFS network map.
- NetmapStatus NetmapStatus `protobuf:"varint,1,opt,name=netmap_status,json=netmapStatus,proto3,enum=control.NetmapStatus" json:"netmap_status,omitempty"`
- // Health status of storage node application.
- HealthStatus HealthStatus `protobuf:"varint,2,opt,name=health_status,json=healthStatus,proto3,enum=control.HealthStatus" json:"health_status,omitempty"`
-}
-
-func (x *HealthCheckResponse_Body) Reset() {
- *x = HealthCheckResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckResponse_Body) ProtoMessage() {}
-
-func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckResponse_Body.ProtoReflect.Descriptor instead.
-func (*HealthCheckResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{1, 0}
-}
-
-func (x *HealthCheckResponse_Body) GetNetmapStatus() NetmapStatus {
- if x != nil {
- return x.NetmapStatus
- }
- return NetmapStatus_STATUS_UNDEFINED
-}
-
-func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
- if x != nil {
- return x.HealthStatus
- }
- return HealthStatus_HEALTH_STATUS_UNDEFINED
-}
-
-// Set netmap status request body.
-type SetNetmapStatusRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // New storage node status in FrostFS network map.
- // If status is MAINTENANCE, the node checks whether maintenance is
- // allowed in the network settings. In case of prohibition, the request
- // is denied. Otherwise, node switches to local maintenance state. To
- // force local maintenance, use `force_maintenance` flag.
- Status NetmapStatus `protobuf:"varint,1,opt,name=status,proto3,enum=control.NetmapStatus" json:"status,omitempty"`
- // MAINTENANCE status validation skip flag. If set, node starts local
- // maintenance regardless of network settings. The flag MUST NOT be
- // set for any other status.
- ForceMaintenance bool `protobuf:"varint,2,opt,name=force_maintenance,json=forceMaintenance,proto3" json:"force_maintenance,omitempty"`
-}
-
-func (x *SetNetmapStatusRequest_Body) Reset() {
- *x = SetNetmapStatusRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetNetmapStatusRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetNetmapStatusRequest_Body) ProtoMessage() {}
-
-func (x *SetNetmapStatusRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetNetmapStatusRequest_Body.ProtoReflect.Descriptor instead.
-func (*SetNetmapStatusRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{2, 0}
-}
-
-func (x *SetNetmapStatusRequest_Body) GetStatus() NetmapStatus {
- if x != nil {
- return x.Status
- }
- return NetmapStatus_STATUS_UNDEFINED
-}
-
-func (x *SetNetmapStatusRequest_Body) GetForceMaintenance() bool {
- if x != nil {
- return x.ForceMaintenance
- }
- return false
-}
-
-// Set netmap status response body
-type SetNetmapStatusResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *SetNetmapStatusResponse_Body) Reset() {
- *x = SetNetmapStatusResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetNetmapStatusResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetNetmapStatusResponse_Body) ProtoMessage() {}
-
-func (x *SetNetmapStatusResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetNetmapStatusResponse_Body.ProtoReflect.Descriptor instead.
-func (*SetNetmapStatusResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{3, 0}
-}
-
-// Request body structure.
-type DropObjectsRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // List of object addresses to be removed.
- // in FrostFS API binary format.
- AddressList [][]byte `protobuf:"bytes,1,rep,name=address_list,json=addressList,proto3" json:"address_list,omitempty"`
-}
-
-func (x *DropObjectsRequest_Body) Reset() {
- *x = DropObjectsRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DropObjectsRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DropObjectsRequest_Body) ProtoMessage() {}
-
-func (x *DropObjectsRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DropObjectsRequest_Body.ProtoReflect.Descriptor instead.
-func (*DropObjectsRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{4, 0}
-}
-
-func (x *DropObjectsRequest_Body) GetAddressList() [][]byte {
- if x != nil {
- return x.AddressList
- }
- return nil
-}
-
-// Response body structure.
-type DropObjectsResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *DropObjectsResponse_Body) Reset() {
- *x = DropObjectsResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DropObjectsResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DropObjectsResponse_Body) ProtoMessage() {}
-
-func (x *DropObjectsResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DropObjectsResponse_Body.ProtoReflect.Descriptor instead.
-func (*DropObjectsResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{5, 0}
-}
-
-// Request body structure.
-type ListShardsRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ListShardsRequest_Body) Reset() {
- *x = ListShardsRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListShardsRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListShardsRequest_Body) ProtoMessage() {}
-
-func (x *ListShardsRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListShardsRequest_Body.ProtoReflect.Descriptor instead.
-func (*ListShardsRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{6, 0}
-}
-
-// Response body structure.
-type ListShardsResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // List of the node's shards.
- Shards []*ShardInfo `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"`
-}
-
-func (x *ListShardsResponse_Body) Reset() {
- *x = ListShardsResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListShardsResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListShardsResponse_Body) ProtoMessage() {}
-
-func (x *ListShardsResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListShardsResponse_Body.ProtoReflect.Descriptor instead.
-func (*ListShardsResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{7, 0}
-}
-
-func (x *ListShardsResponse_Body) GetShards() []*ShardInfo {
- if x != nil {
- return x.Shards
- }
- return nil
-}
-
-// Request body structure.
-type SetShardModeRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Mode that requested to be set.
- Mode ShardMode `protobuf:"varint,2,opt,name=mode,proto3,enum=control.ShardMode" json:"mode,omitempty"`
- // Flag signifying whether error counter should be set to 0.
- ResetErrorCounter bool `protobuf:"varint,3,opt,name=resetErrorCounter,proto3" json:"resetErrorCounter,omitempty"`
-}
-
-func (x *SetShardModeRequest_Body) Reset() {
- *x = SetShardModeRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetShardModeRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetShardModeRequest_Body) ProtoMessage() {}
-
-func (x *SetShardModeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetShardModeRequest_Body.ProtoReflect.Descriptor instead.
-func (*SetShardModeRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{8, 0}
-}
-
-func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *SetShardModeRequest_Body) GetMode() ShardMode {
- if x != nil {
- return x.Mode
- }
- return ShardMode_SHARD_MODE_UNDEFINED
-}
-
-func (x *SetShardModeRequest_Body) GetResetErrorCounter() bool {
- if x != nil {
- return x.ResetErrorCounter
- }
- return false
-}
-
-// Response body structure.
-type SetShardModeResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *SetShardModeResponse_Body) Reset() {
- *x = SetShardModeResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetShardModeResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetShardModeResponse_Body) ProtoMessage() {}
-
-func (x *SetShardModeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetShardModeResponse_Body.ProtoReflect.Descriptor instead.
-func (*SetShardModeResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{9, 0}
-}
-
-// Request body structure.
-type DumpShardRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Path to the output.
- Filepath string `protobuf:"bytes,2,opt,name=filepath,proto3" json:"filepath,omitempty"`
- // Flag indicating whether object read errors should be ignored.
- IgnoreErrors bool `protobuf:"varint,3,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
-}
-
-func (x *DumpShardRequest_Body) Reset() {
- *x = DumpShardRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DumpShardRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DumpShardRequest_Body) ProtoMessage() {}
-
-func (x *DumpShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DumpShardRequest_Body.ProtoReflect.Descriptor instead.
-func (*DumpShardRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10, 0}
-}
-
-func (x *DumpShardRequest_Body) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *DumpShardRequest_Body) GetFilepath() string {
- if x != nil {
- return x.Filepath
- }
- return ""
-}
-
-func (x *DumpShardRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-
-// Response body structure.
-type DumpShardResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *DumpShardResponse_Body) Reset() {
- *x = DumpShardResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DumpShardResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DumpShardResponse_Body) ProtoMessage() {}
-
-func (x *DumpShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DumpShardResponse_Body.ProtoReflect.Descriptor instead.
-func (*DumpShardResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11, 0}
-}
-
-// Request body structure.
-type RestoreShardRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Path to the output.
- Filepath string `protobuf:"bytes,2,opt,name=filepath,proto3" json:"filepath,omitempty"`
- // Flag indicating whether object read errors should be ignored.
- IgnoreErrors bool `protobuf:"varint,3,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
-}
-
-func (x *RestoreShardRequest_Body) Reset() {
- *x = RestoreShardRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreShardRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreShardRequest_Body) ProtoMessage() {}
-
-func (x *RestoreShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreShardRequest_Body.ProtoReflect.Descriptor instead.
-func (*RestoreShardRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12, 0}
-}
-
-func (x *RestoreShardRequest_Body) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *RestoreShardRequest_Body) GetFilepath() string {
- if x != nil {
- return x.Filepath
- }
- return ""
-}
-
-func (x *RestoreShardRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-
-// Response body structure.
-type RestoreShardResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *RestoreShardResponse_Body) Reset() {
- *x = RestoreShardResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreShardResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreShardResponse_Body) ProtoMessage() {}
-
-func (x *RestoreShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreShardResponse_Body.ProtoReflect.Descriptor instead.
-func (*RestoreShardResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13, 0}
-}
-
-// Request body structure.
-type SynchronizeTreeRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Starting height for the synchronization. Can be omitted.
- Height uint64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
-}
-
-func (x *SynchronizeTreeRequest_Body) Reset() {
- *x = SynchronizeTreeRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SynchronizeTreeRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SynchronizeTreeRequest_Body) ProtoMessage() {}
-
-func (x *SynchronizeTreeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SynchronizeTreeRequest_Body.ProtoReflect.Descriptor instead.
-func (*SynchronizeTreeRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14, 0}
-}
-
-func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *SynchronizeTreeRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *SynchronizeTreeRequest_Body) GetHeight() uint64 {
- if x != nil {
- return x.Height
- }
- return 0
-}
-
-// Response body structure.
-type SynchronizeTreeResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *SynchronizeTreeResponse_Body) Reset() {
- *x = SynchronizeTreeResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SynchronizeTreeResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SynchronizeTreeResponse_Body) ProtoMessage() {}
-
-func (x *SynchronizeTreeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SynchronizeTreeResponse_Body.ProtoReflect.Descriptor instead.
-func (*SynchronizeTreeResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15, 0}
-}
-
-// Request body structure.
-type EvacuateShardRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Flag indicating whether object read errors should be ignored.
- IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
-}
-
-func (x *EvacuateShardRequest_Body) Reset() {
- *x = EvacuateShardRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EvacuateShardRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EvacuateShardRequest_Body) ProtoMessage() {}
-
-func (x *EvacuateShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EvacuateShardRequest_Body.ProtoReflect.Descriptor instead.
-func (*EvacuateShardRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16, 0}
-}
-
-func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *EvacuateShardRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-
-// Response body structure.
-type EvacuateShardResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Count uint32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
-}
-
-func (x *EvacuateShardResponse_Body) Reset() {
- *x = EvacuateShardResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EvacuateShardResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EvacuateShardResponse_Body) ProtoMessage() {}
-
-func (x *EvacuateShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EvacuateShardResponse_Body.ProtoReflect.Descriptor instead.
-func (*EvacuateShardResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17, 0}
-}
-
-func (x *EvacuateShardResponse_Body) GetCount() uint32 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-
-// Request body structure.
-type FlushCacheRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
-}
-
-func (x *FlushCacheRequest_Body) Reset() {
- *x = FlushCacheRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FlushCacheRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FlushCacheRequest_Body) ProtoMessage() {}
-
-func (x *FlushCacheRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FlushCacheRequest_Body.ProtoReflect.Descriptor instead.
-func (*FlushCacheRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18, 0}
-}
-
-func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-// Response body structure.
-type FlushCacheResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *FlushCacheResponse_Body) Reset() {
- *x = FlushCacheResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FlushCacheResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FlushCacheResponse_Body) ProtoMessage() {}
-
-func (x *FlushCacheResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FlushCacheResponse_Body.ProtoReflect.Descriptor instead.
-func (*FlushCacheResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19, 0}
-}
-
-var File_pkg_services_control_service_proto protoreflect.FileDescriptor
-
-var file_pkg_services_control_service_proto_rawDesc = []byte{
- 0x0a, 0x22, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x20, 0x70,
- 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
- 0x84, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xfe, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x3a, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6e,
- 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x68,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x74, 0x4e,
- 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65,
- 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x62,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x6d,
- 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x10, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e,
- 0x63, 0x65, 0x22, 0x8e, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x22, 0xa7, 0x01, 0x0a, 0x12, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x1a, 0x29, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x64,
- 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
- 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x86, 0x01,
- 0x0a, 0x13, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72,
- 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x82, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x12,
- 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x32, 0x0a, 0x04, 0x42, 0x6f,
- 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0xf7,
- 0x01, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0x77, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x49, 0x44, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x72, 0x65,
- 0x73, 0x65, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x72, 0x65, 0x73, 0x65, 0x74, 0x45, 0x72, 0x72, 0x6f,
- 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x88, 0x01, 0x0a, 0x14, 0x53, 0x65, 0x74,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x22, 0xdc, 0x01, 0x0a, 0x10, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x62,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f,
- 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49,
- 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a,
- 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f,
- 0x72, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x11, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe2, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x74,
- 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79,
- 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x62, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x66,
- 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66,
- 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72,
- 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
- 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x88, 0x01, 0x0a,
- 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65,
- 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe0, 0x01, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x63,
- 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
- 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x5a,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
- 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f,
- 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65,
- 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65,
- 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x17, 0x53,
- 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a, 0x14,
- 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61,
- 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x46,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f,
- 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49,
- 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65,
- 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x63, 0x75,
- 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
- 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x1c, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x11, 0x46, 0x6c,
- 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63,
- 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19,
- 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
- 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x46, 0x6c,
- 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61,
- 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79,
- 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x32, 0x8c, 0x06, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a,
- 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65,
- 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e,
- 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f,
- 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a,
- 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x42, 0x0a, 0x09, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x19,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65,
- 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a,
- 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63,
- 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73,
- 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75,
- 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
- 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69,
- 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62,
- 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b,
- 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_control_service_proto_rawDescOnce sync.Once
- file_pkg_services_control_service_proto_rawDescData = file_pkg_services_control_service_proto_rawDesc
-)
-
-func file_pkg_services_control_service_proto_rawDescGZIP() []byte {
- file_pkg_services_control_service_proto_rawDescOnce.Do(func() {
- file_pkg_services_control_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_service_proto_rawDescData)
- })
- return file_pkg_services_control_service_proto_rawDescData
-}
-
-var file_pkg_services_control_service_proto_msgTypes = make([]protoimpl.MessageInfo, 40)
-var file_pkg_services_control_service_proto_goTypes = []interface{}{
- (*HealthCheckRequest)(nil), // 0: control.HealthCheckRequest
- (*HealthCheckResponse)(nil), // 1: control.HealthCheckResponse
- (*SetNetmapStatusRequest)(nil), // 2: control.SetNetmapStatusRequest
- (*SetNetmapStatusResponse)(nil), // 3: control.SetNetmapStatusResponse
- (*DropObjectsRequest)(nil), // 4: control.DropObjectsRequest
- (*DropObjectsResponse)(nil), // 5: control.DropObjectsResponse
- (*ListShardsRequest)(nil), // 6: control.ListShardsRequest
- (*ListShardsResponse)(nil), // 7: control.ListShardsResponse
- (*SetShardModeRequest)(nil), // 8: control.SetShardModeRequest
- (*SetShardModeResponse)(nil), // 9: control.SetShardModeResponse
- (*DumpShardRequest)(nil), // 10: control.DumpShardRequest
- (*DumpShardResponse)(nil), // 11: control.DumpShardResponse
- (*RestoreShardRequest)(nil), // 12: control.RestoreShardRequest
- (*RestoreShardResponse)(nil), // 13: control.RestoreShardResponse
- (*SynchronizeTreeRequest)(nil), // 14: control.SynchronizeTreeRequest
- (*SynchronizeTreeResponse)(nil), // 15: control.SynchronizeTreeResponse
- (*EvacuateShardRequest)(nil), // 16: control.EvacuateShardRequest
- (*EvacuateShardResponse)(nil), // 17: control.EvacuateShardResponse
- (*FlushCacheRequest)(nil), // 18: control.FlushCacheRequest
- (*FlushCacheResponse)(nil), // 19: control.FlushCacheResponse
- (*HealthCheckRequest_Body)(nil), // 20: control.HealthCheckRequest.Body
- (*HealthCheckResponse_Body)(nil), // 21: control.HealthCheckResponse.Body
- (*SetNetmapStatusRequest_Body)(nil), // 22: control.SetNetmapStatusRequest.Body
- (*SetNetmapStatusResponse_Body)(nil), // 23: control.SetNetmapStatusResponse.Body
- (*DropObjectsRequest_Body)(nil), // 24: control.DropObjectsRequest.Body
- (*DropObjectsResponse_Body)(nil), // 25: control.DropObjectsResponse.Body
- (*ListShardsRequest_Body)(nil), // 26: control.ListShardsRequest.Body
- (*ListShardsResponse_Body)(nil), // 27: control.ListShardsResponse.Body
- (*SetShardModeRequest_Body)(nil), // 28: control.SetShardModeRequest.Body
- (*SetShardModeResponse_Body)(nil), // 29: control.SetShardModeResponse.Body
- (*DumpShardRequest_Body)(nil), // 30: control.DumpShardRequest.Body
- (*DumpShardResponse_Body)(nil), // 31: control.DumpShardResponse.Body
- (*RestoreShardRequest_Body)(nil), // 32: control.RestoreShardRequest.Body
- (*RestoreShardResponse_Body)(nil), // 33: control.RestoreShardResponse.Body
- (*SynchronizeTreeRequest_Body)(nil), // 34: control.SynchronizeTreeRequest.Body
- (*SynchronizeTreeResponse_Body)(nil), // 35: control.SynchronizeTreeResponse.Body
- (*EvacuateShardRequest_Body)(nil), // 36: control.EvacuateShardRequest.Body
- (*EvacuateShardResponse_Body)(nil), // 37: control.EvacuateShardResponse.Body
- (*FlushCacheRequest_Body)(nil), // 38: control.FlushCacheRequest.Body
- (*FlushCacheResponse_Body)(nil), // 39: control.FlushCacheResponse.Body
- (*Signature)(nil), // 40: control.Signature
- (NetmapStatus)(0), // 41: control.NetmapStatus
- (HealthStatus)(0), // 42: control.HealthStatus
- (*ShardInfo)(nil), // 43: control.ShardInfo
- (ShardMode)(0), // 44: control.ShardMode
-}
-var file_pkg_services_control_service_proto_depIdxs = []int32{
- 20, // 0: control.HealthCheckRequest.body:type_name -> control.HealthCheckRequest.Body
- 40, // 1: control.HealthCheckRequest.signature:type_name -> control.Signature
- 21, // 2: control.HealthCheckResponse.body:type_name -> control.HealthCheckResponse.Body
- 40, // 3: control.HealthCheckResponse.signature:type_name -> control.Signature
- 22, // 4: control.SetNetmapStatusRequest.body:type_name -> control.SetNetmapStatusRequest.Body
- 40, // 5: control.SetNetmapStatusRequest.signature:type_name -> control.Signature
- 23, // 6: control.SetNetmapStatusResponse.body:type_name -> control.SetNetmapStatusResponse.Body
- 40, // 7: control.SetNetmapStatusResponse.signature:type_name -> control.Signature
- 24, // 8: control.DropObjectsRequest.body:type_name -> control.DropObjectsRequest.Body
- 40, // 9: control.DropObjectsRequest.signature:type_name -> control.Signature
- 25, // 10: control.DropObjectsResponse.body:type_name -> control.DropObjectsResponse.Body
- 40, // 11: control.DropObjectsResponse.signature:type_name -> control.Signature
- 26, // 12: control.ListShardsRequest.body:type_name -> control.ListShardsRequest.Body
- 40, // 13: control.ListShardsRequest.signature:type_name -> control.Signature
- 27, // 14: control.ListShardsResponse.body:type_name -> control.ListShardsResponse.Body
- 40, // 15: control.ListShardsResponse.signature:type_name -> control.Signature
- 28, // 16: control.SetShardModeRequest.body:type_name -> control.SetShardModeRequest.Body
- 40, // 17: control.SetShardModeRequest.signature:type_name -> control.Signature
- 29, // 18: control.SetShardModeResponse.body:type_name -> control.SetShardModeResponse.Body
- 40, // 19: control.SetShardModeResponse.signature:type_name -> control.Signature
- 30, // 20: control.DumpShardRequest.body:type_name -> control.DumpShardRequest.Body
- 40, // 21: control.DumpShardRequest.signature:type_name -> control.Signature
- 31, // 22: control.DumpShardResponse.body:type_name -> control.DumpShardResponse.Body
- 40, // 23: control.DumpShardResponse.signature:type_name -> control.Signature
- 32, // 24: control.RestoreShardRequest.body:type_name -> control.RestoreShardRequest.Body
- 40, // 25: control.RestoreShardRequest.signature:type_name -> control.Signature
- 33, // 26: control.RestoreShardResponse.body:type_name -> control.RestoreShardResponse.Body
- 40, // 27: control.RestoreShardResponse.signature:type_name -> control.Signature
- 34, // 28: control.SynchronizeTreeRequest.body:type_name -> control.SynchronizeTreeRequest.Body
- 40, // 29: control.SynchronizeTreeRequest.signature:type_name -> control.Signature
- 35, // 30: control.SynchronizeTreeResponse.body:type_name -> control.SynchronizeTreeResponse.Body
- 40, // 31: control.SynchronizeTreeResponse.signature:type_name -> control.Signature
- 36, // 32: control.EvacuateShardRequest.body:type_name -> control.EvacuateShardRequest.Body
- 40, // 33: control.EvacuateShardRequest.signature:type_name -> control.Signature
- 37, // 34: control.EvacuateShardResponse.body:type_name -> control.EvacuateShardResponse.Body
- 40, // 35: control.EvacuateShardResponse.signature:type_name -> control.Signature
- 38, // 36: control.FlushCacheRequest.body:type_name -> control.FlushCacheRequest.Body
- 40, // 37: control.FlushCacheRequest.signature:type_name -> control.Signature
- 39, // 38: control.FlushCacheResponse.body:type_name -> control.FlushCacheResponse.Body
- 40, // 39: control.FlushCacheResponse.signature:type_name -> control.Signature
- 41, // 40: control.HealthCheckResponse.Body.netmap_status:type_name -> control.NetmapStatus
- 42, // 41: control.HealthCheckResponse.Body.health_status:type_name -> control.HealthStatus
- 41, // 42: control.SetNetmapStatusRequest.Body.status:type_name -> control.NetmapStatus
- 43, // 43: control.ListShardsResponse.Body.shards:type_name -> control.ShardInfo
- 44, // 44: control.SetShardModeRequest.Body.mode:type_name -> control.ShardMode
- 0, // 45: control.ControlService.HealthCheck:input_type -> control.HealthCheckRequest
- 2, // 46: control.ControlService.SetNetmapStatus:input_type -> control.SetNetmapStatusRequest
- 4, // 47: control.ControlService.DropObjects:input_type -> control.DropObjectsRequest
- 6, // 48: control.ControlService.ListShards:input_type -> control.ListShardsRequest
- 8, // 49: control.ControlService.SetShardMode:input_type -> control.SetShardModeRequest
- 10, // 50: control.ControlService.DumpShard:input_type -> control.DumpShardRequest
- 12, // 51: control.ControlService.RestoreShard:input_type -> control.RestoreShardRequest
- 14, // 52: control.ControlService.SynchronizeTree:input_type -> control.SynchronizeTreeRequest
- 16, // 53: control.ControlService.EvacuateShard:input_type -> control.EvacuateShardRequest
- 18, // 54: control.ControlService.FlushCache:input_type -> control.FlushCacheRequest
- 1, // 55: control.ControlService.HealthCheck:output_type -> control.HealthCheckResponse
- 3, // 56: control.ControlService.SetNetmapStatus:output_type -> control.SetNetmapStatusResponse
- 5, // 57: control.ControlService.DropObjects:output_type -> control.DropObjectsResponse
- 7, // 58: control.ControlService.ListShards:output_type -> control.ListShardsResponse
- 9, // 59: control.ControlService.SetShardMode:output_type -> control.SetShardModeResponse
- 11, // 60: control.ControlService.DumpShard:output_type -> control.DumpShardResponse
- 13, // 61: control.ControlService.RestoreShard:output_type -> control.RestoreShardResponse
- 15, // 62: control.ControlService.SynchronizeTree:output_type -> control.SynchronizeTreeResponse
- 17, // 63: control.ControlService.EvacuateShard:output_type -> control.EvacuateShardResponse
- 19, // 64: control.ControlService.FlushCache:output_type -> control.FlushCacheResponse
- 55, // [55:65] is the sub-list for method output_type
- 45, // [45:55] is the sub-list for method input_type
- 45, // [45:45] is the sub-list for extension type_name
- 45, // [45:45] is the sub-list for extension extendee
- 0, // [0:45] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_control_service_proto_init() }
-func file_pkg_services_control_service_proto_init() {
- if File_pkg_services_control_service_proto != nil {
- return
- }
- file_pkg_services_control_types_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_control_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlushCacheRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlushCacheResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlushCacheRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlushCacheResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_control_service_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 40,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_pkg_services_control_service_proto_goTypes,
- DependencyIndexes: file_pkg_services_control_service_proto_depIdxs,
- MessageInfos: file_pkg_services_control_service_proto_msgTypes,
- }.Build()
- File_pkg_services_control_service_proto = out.File
- file_pkg_services_control_service_proto_rawDesc = nil
- file_pkg_services_control_service_proto_goTypes = nil
- file_pkg_services_control_service_proto_depIdxs = nil
-}
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 5a09a74a53..4c539acfc2 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -6,342 +6,749 @@ import "pkg/services/control/types.proto";
option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control";
-// `ControlService` provides an interface for internal work with the storage node.
+// `ControlService` provides an interface for internal work with the storage
+// node.
service ControlService {
- // Performs health check of the storage node.
- rpc HealthCheck (HealthCheckRequest) returns (HealthCheckResponse);
+ // Performs health check of the storage node.
+ rpc HealthCheck(HealthCheckRequest) returns (HealthCheckResponse);
- // Sets status of the storage node in FrostFS network map.
- rpc SetNetmapStatus (SetNetmapStatusRequest) returns (SetNetmapStatusResponse);
+ // Sets status of the storage node in FrostFS network map.
+ rpc SetNetmapStatus(SetNetmapStatusRequest) returns (SetNetmapStatusResponse);
- // Mark objects to be removed from node's local object storage.
- rpc DropObjects (DropObjectsRequest) returns (DropObjectsResponse);
+ // Gets status of the storage node in FrostFS network map.
+ rpc GetNetmapStatus(GetNetmapStatusRequest) returns (GetNetmapStatusResponse);
- // Returns list that contains information about all shards of a node.
- rpc ListShards (ListShardsRequest) returns (ListShardsResponse);
+ // Mark objects to be removed from node's local object storage.
+ rpc DropObjects(DropObjectsRequest) returns (DropObjectsResponse);
- // Sets mode of the shard.
- rpc SetShardMode (SetShardModeRequest) returns (SetShardModeResponse);
+ // Returns list that contains information about all shards of a node.
+ rpc ListShards(ListShardsRequest) returns (ListShardsResponse);
- // Dump objects from the shard.
- rpc DumpShard (DumpShardRequest) returns (DumpShardResponse);
+ // Sets mode of the shard.
+ rpc SetShardMode(SetShardModeRequest) returns (SetShardModeResponse);
- // Restore objects from dump.
- rpc RestoreShard (RestoreShardRequest) returns (RestoreShardResponse);
+ // Synchronizes all log operations for the specified tree.
+ rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
- // Synchronizes all log operations for the specified tree.
- rpc SynchronizeTree (SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
+ // StartShardEvacuation starts moving all data from one shard to the others.
+ rpc StartShardEvacuation(StartShardEvacuationRequest)
+ returns (StartShardEvacuationResponse);
- // EvacuateShard moves all data from one shard to the others.
- rpc EvacuateShard (EvacuateShardRequest) returns (EvacuateShardResponse);
+ // GetShardEvacuationStatus returns evacuation status.
+ rpc GetShardEvacuationStatus(GetShardEvacuationStatusRequest)
+ returns (GetShardEvacuationStatusResponse);
- // FlushCache moves all data from one shard to the others.
- rpc FlushCache (FlushCacheRequest) returns (FlushCacheResponse);
+ // ResetShardEvacuationStatus resets evacuation status if there is no running
+ // evacuation process.
+ rpc ResetShardEvacuationStatus(ResetShardEvacuationStatusRequest)
+ returns (ResetShardEvacuationStatusResponse);
+
+ // StopShardEvacuation stops moving all data from one shard to the others.
+ rpc StopShardEvacuation(StopShardEvacuationRequest)
+ returns (StopShardEvacuationResponse);
+
+ // FlushCache moves all data from one shard to the others.
+ rpc FlushCache(FlushCacheRequest) returns (FlushCacheResponse);
+
+ // Doctor performs storage restructuring operations on engine.
+ rpc Doctor(DoctorRequest) returns (DoctorResponse);
+
+ // Add local access policy engine overrides to a node.
+ rpc AddChainLocalOverride(AddChainLocalOverrideRequest)
+ returns (AddChainLocalOverrideResponse);
+
+ // Get local access policy engine overrides stored in the node by chain id.
+ rpc GetChainLocalOverride(GetChainLocalOverrideRequest)
+ returns (GetChainLocalOverrideResponse);
+
+ // List local access policy engine overrides stored in the node by container
+ // id.
+ rpc ListChainLocalOverrides(ListChainLocalOverridesRequest)
+ returns (ListChainLocalOverridesResponse);
+
+ // Remove local access policy engine overrides stored in the node by chaind
+ // id.
+ rpc RemoveChainLocalOverride(RemoveChainLocalOverrideRequest)
+ returns (RemoveChainLocalOverrideResponse);
+
+ // Remove local access policy engine overrides stored in the node by chaind
+ // id.
+ rpc RemoveChainLocalOverridesByTarget(
+ RemoveChainLocalOverridesByTargetRequest)
+ returns (RemoveChainLocalOverridesByTargetResponse);
+
+ // List targets of the local APE overrides stored in the node.
+ rpc ListTargetsLocalOverrides(ListTargetsLocalOverridesRequest)
+ returns (ListTargetsLocalOverridesResponse);
+
+ // Flush objects from write-cache and move it to degraded read only mode.
+ rpc SealWriteCache(SealWriteCacheRequest) returns (SealWriteCacheResponse);
+
+ // DetachShards detaches and closes shards.
+ rpc DetachShards(DetachShardsRequest) returns (DetachShardsResponse);
+
+ // StartShardRebuild starts shard rebuild process.
+ rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse);
+
+ // ListShardsForObject returns shard info where object is stored.
+ rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse);
}
// Health check request.
message HealthCheckRequest {
- // Health check request body.
- message Body {
- }
+ // Health check request body.
+ message Body {}
- // Body of health check request message.
- Body body = 1;
+ // Body of health check request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Health check request.
message HealthCheckResponse {
- // Health check response body
- message Body {
- // Status of the storage node in FrostFS network map.
- NetmapStatus netmap_status = 1;
+ // Health check response body
+ message Body {
+ // Status of the storage node in FrostFS network map.
+ NetmapStatus netmap_status = 1;
- // Health status of storage node application.
- HealthStatus health_status = 2;
- }
+ // Health status of storage node application.
+ HealthStatus health_status = 2;
+ }
- // Body of health check response message.
- Body body = 1;
+ // Body of health check response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Set netmap status request.
message SetNetmapStatusRequest {
- // Set netmap status request body.
- message Body {
- // New storage node status in FrostFS network map.
- // If status is MAINTENANCE, the node checks whether maintenance is
- // allowed in the network settings. In case of prohibition, the request
- // is denied. Otherwise, node switches to local maintenance state. To
- // force local maintenance, use `force_maintenance` flag.
- NetmapStatus status = 1;
+ // Set netmap status request body.
+ message Body {
+ // New storage node status in FrostFS network map.
+ // If status is MAINTENANCE, the node checks whether maintenance is
+ // allowed in the network settings. In case of prohibition, the request
+ // is denied. Otherwise, node switches to local maintenance state. To
+ // force local maintenance, use `force_maintenance` flag.
+ NetmapStatus status = 1;
- // MAINTENANCE status validation skip flag. If set, node starts local
- // maintenance regardless of network settings. The flag MUST NOT be
- // set for any other status.
- bool force_maintenance = 2;
- }
+ // MAINTENANCE status validation skip flag. If set, node starts local
+ // maintenance regardless of network settings. The flag MUST NOT be
+ // set for any other status.
+ bool force_maintenance = 2;
+ }
- // Body of set netmap status request message.
- Body body = 1;
+ // Body of set netmap status request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Set netmap status response.
message SetNetmapStatusResponse {
- // Set netmap status response body
- message Body {
- }
+ // Set netmap status response body
+ message Body {}
- // Body of set netmap status response message.
- Body body = 1;
+ // Body of set netmap status response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
+}
+
+// Get netmap status request.
+message GetNetmapStatusRequest {
+ message Body {}
+
+ // Body of set netmap status request message.
+ Body body = 1;
+
+ // Body signature.
+ Signature signature = 2;
+}
+
+// Get netmap status response.
+message GetNetmapStatusResponse {
+ message Body {
+ // Storage node status in FrostFS network map.
+ NetmapStatus status = 1;
+
+ // Network map epoch.
+ uint64 epoch = 2;
+ }
+
+ // Body of get netmap status response message.
+ Body body = 1;
+
+ // Body signature.
+ Signature signature = 2;
}
// Request to drop the objects.
message DropObjectsRequest {
- // Request body structure.
- message Body {
- // List of object addresses to be removed.
- // in FrostFS API binary format.
- repeated bytes address_list = 1;
- }
+ // Request body structure.
+ message Body {
+ // List of object addresses to be removed.
+ // in FrostFS API binary format.
+ repeated bytes address_list = 1;
+ }
- // Body of the request message.
- Body body = 1;
+ // Body of the request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Response to request to drop the objects.
message DropObjectsResponse {
- // Response body structure.
- message Body {
- }
+ // Response body structure.
+ message Body {}
- // Body of the response message.
- Body body = 1;
+ // Body of the response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Request to list all shards of the node.
message ListShardsRequest {
- // Request body structure.
- message Body {
- }
+ // Request body structure.
+ message Body {}
- // Body of the request message.
- Body body = 1;
+ // Body of the request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// ListShards response.
message ListShardsResponse {
- // Response body structure.
- message Body {
- // List of the node's shards.
- repeated ShardInfo shards = 1;
- }
+ // Response body structure.
+ message Body {
+ // List of the node's shards.
+ repeated ShardInfo shards = 1;
+ }
- // Body of the response message.
- Body body = 1;
+ // Body of the response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Request to set mode of the shard.
message SetShardModeRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- repeated bytes shard_ID = 1;
+ // Request body structure.
+ message Body {
+ // ID of the shard.
+ repeated bytes shard_ID = 1;
- // Mode that requested to be set.
- ShardMode mode = 2;
+ // Mode that requested to be set.
+ ShardMode mode = 2;
- // Flag signifying whether error counter should be set to 0.
- bool resetErrorCounter = 3;
- }
+ // Flag signifying whether error counter should be set to 0.
+ bool resetErrorCounter = 3;
+ }
- // Body of set shard mode request message.
- Body body = 1;
+ // Body of set shard mode request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// SetShardMode response.
message SetShardModeResponse {
- // Response body structure.
- message Body {
- }
+ // Response body structure.
+ message Body {}
- // Body of set shard mode response message.
- Body body = 1;
+ // Body of set shard mode response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
-}
-
-// DumpShard request.
-message DumpShardRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- bytes shard_ID = 1;
-
- // Path to the output.
- string filepath = 2;
-
- // Flag indicating whether object read errors should be ignored.
- bool ignore_errors = 3;
- }
-
- // Body of dump shard request message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
-}
-
-// DumpShard response.
-message DumpShardResponse {
- // Response body structure.
- message Body {
- }
-
- // Body of dump shard response message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
-}
-
-
-// RestoreShard request.
-message RestoreShardRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- bytes shard_ID = 1;
-
- // Path to the output.
- string filepath = 2;
-
- // Flag indicating whether object read errors should be ignored.
- bool ignore_errors = 3;
- }
-
- // Body of restore shard request message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
-}
-
-// RestoreShard response.
-message RestoreShardResponse {
- // Response body structure.
- message Body {
- }
-
- // Body of restore shard response message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// SynchronizeTree request.
message SynchronizeTreeRequest {
- // Request body structure.
- message Body {
- bytes container_id = 1;
- string tree_id = 2;
- // Starting height for the synchronization. Can be omitted.
- uint64 height = 3;
- }
+ // Request body structure.
+ message Body {
+ bytes container_id = 1;
+ string tree_id = 2;
+ // Starting height for the synchronization. Can be omitted.
+ uint64 height = 3;
+ }
- // Body of restore shard request message.
- Body body = 1;
+ // Body of restore shard request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// SynchronizeTree response.
message SynchronizeTreeResponse {
- // Response body structure.
- message Body {
- }
+ // Response body structure.
+ message Body {}
- // Body of restore shard response message.
- Body body = 1;
+ // Body of restore shard response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
-
// EvacuateShard request.
message EvacuateShardRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- repeated bytes shard_ID = 1;
+ // Request body structure.
+ message Body {
+ // ID of the shard.
+ repeated bytes shard_ID = 1;
- // Flag indicating whether object read errors should be ignored.
- bool ignore_errors = 2;
- }
+ // Flag indicating whether object read errors should be ignored.
+ bool ignore_errors = 2;
+ }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// EvacuateShard response.
message EvacuateShardResponse {
- // Response body structure.
- message Body {
- uint32 count = 1;
- }
+ // Response body structure.
+ message Body { uint32 count = 1; }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// FlushCache request.
message FlushCacheRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- repeated bytes shard_ID = 1;
- }
+ // Request body structure.
+ message Body {
+ // ID of the shard.
+ repeated bytes shard_ID = 1;
+ // If true, then writecache will be left in read-only mode after flush
+ // completed.
+ bool seal = 2;
+ }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// FlushCache response.
message FlushCacheResponse {
- // Response body structure.
- message Body {
+ // Response body structure.
+ message Body {}
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// Doctor request.
+message DoctorRequest {
+ // Request body structure.
+ message Body {
+ // Number of threads to use for the operation.
+ uint32 concurrency = 1;
+ // Flag to search engine for duplicate objects and leave only one copy.
+ bool remove_duplicates = 2;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// Doctor response.
+message DoctorResponse {
+ // Response body structure.
+ message Body {}
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// StartShardEvacuation request.
+message StartShardEvacuationRequest {
+ // Request body structure.
+ message Body {
+ enum Scope {
+ NONE = 0;
+ OBJECTS = 1;
+ TREES = 2;
}
- Body body = 1;
- Signature signature = 2;
+ // IDs of the shards.
+ repeated bytes shard_ID = 1;
+ // Flag indicating whether object read errors should be ignored.
+ bool ignore_errors = 2;
+ // Evacuation scope.
+ uint32 scope = 3;
+ // Count of concurrent container evacuation workers.
+ uint32 container_worker_count = 4;
+ // Count of concurrent object evacuation workers.
+ uint32 object_worker_count = 5;
+ // Choose for evacuation objects in `REP 1` containers only.
+ bool rep_one_only = 6;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// StartShardEvacuation response.
+message StartShardEvacuationResponse {
+ // Response body structure.
+ message Body {}
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// GetShardEvacuationStatus request.
+message GetShardEvacuationStatusRequest {
+ // Request body structure.
+ message Body {}
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// GetShardEvacuationStatus response.
+message GetShardEvacuationStatusResponse {
+ // Response body structure.
+ message Body {
+ // Evacuate status enum.
+ enum Status {
+ EVACUATE_SHARD_STATUS_UNDEFINED = 0;
+ RUNNING = 1;
+ COMPLETED = 2;
+ }
+
+ // Unix timestamp value.
+ message UnixTimestamp { int64 value = 1; }
+
+ // Duration in seconds.
+ message Duration { int64 seconds = 1; }
+
+ // Total objects to evacuate count. The value is approximate, so evacuated +
+ // failed + skipped == total is not guaranteed after completion.
+ uint64 total_objects = 1;
+ // Evacuated objects count.
+ uint64 evacuated_objects = 2;
+ // Failed objects count.
+ uint64 failed_objects = 3;
+
+ // Shard IDs.
+ repeated bytes shard_ID = 4;
+ // Evacuation process status.
+ Status status = 5;
+ // Evacuation process duration.
+ Duration duration = 6;
+ // Evacuation process started at timestamp.
+ UnixTimestamp started_at = 7;
+ // Error message if evacuation failed.
+ string error_message = 8;
+
+ // Skipped objects count.
+ uint64 skipped_objects = 9;
+
+ // Total trees to evacuate count.
+ uint64 total_trees = 10;
+ // Evacuated trees count.
+ uint64 evacuated_trees = 11;
+ // Failed trees count.
+ uint64 failed_trees = 12;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// ResetShardEvacuationStatus request.
+message ResetShardEvacuationStatusRequest {
+ message Body {}
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// ResetShardEvacuationStatus response.
+message ResetShardEvacuationStatusResponse {
+ message Body {}
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// StopShardEvacuation request.
+message StopShardEvacuationRequest {
+ // Request body structure.
+ message Body {}
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// StopShardEvacuation response.
+message StopShardEvacuationResponse {
+ // Response body structure.
+ message Body {}
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// AddChainLocalOverride request.
+message AddChainLocalOverrideRequest {
+ message Body {
+ // Target for which the overrides are applied.
+ ChainTarget target = 1;
+
+ // Serialized rule chain. If chain ID is left empty
+ // in the chain, then it will be generated and returned
+ // in the response.
+ bytes chain = 2;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+// AddChainLocalOverride response.
+message AddChainLocalOverrideResponse {
+ message Body {
+ // Chain ID assigned for the added rule chain.
+ // If chain ID is left empty in the request, then
+ // it will be generated.
+ bytes chain_id = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+// GetChainLocalOverride request.
+message GetChainLocalOverrideRequest {
+ message Body {
+ // Target for which the overrides are applied.
+ ChainTarget target = 1;
+
+ // Chain ID assigned for the added rule chain.
+ bytes chain_id = 2;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+// GetChainLocalOverride response.
+message GetChainLocalOverrideResponse {
+ message Body {
+ // Serialized rule chain.
+ bytes chain = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+// ListChainLocalOverrides request.
+message ListChainLocalOverridesRequest {
+ message Body {
+ // Target for which the overrides are applied.
+ ChainTarget target = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+// ListChainLocalOverrides response.
+message ListChainLocalOverridesResponse {
+ message Body {
+ // The list of serialized rule chain.
+ repeated bytes chains = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+// ListTargetsLocalOverrides request.
+message ListTargetsLocalOverridesRequest {
+ message Body {
+ // Target for which the overrides are applied.
+ string chainName = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+// ListTargetsLocalOverrides response.
+message ListTargetsLocalOverridesResponse {
+ message Body {
+ // The list of chain targets.
+ repeated ChainTarget targets = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+message RemoveChainLocalOverrideRequest {
+ message Body {
+ // Target for which the overrides are applied.
+ ChainTarget target = 1;
+
+ // Chain ID assigned for the added rule chain.
+ bytes chain_id = 2;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+message RemoveChainLocalOverrideResponse {
+ message Body {}
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+message RemoveChainLocalOverridesByTargetRequest {
+ message Body {
+ // Target for which the overrides are applied.
+ ChainTarget target = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+message RemoveChainLocalOverridesByTargetResponse {
+ message Body {}
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+message SealWriteCacheRequest {
+ // Request body structure.
+ message Body {
+ // ID of the shard.
+ repeated bytes shard_ID = 1;
+
+ // Flag indicating whether object read errors should be ignored.
+ bool ignore_errors = 2;
+
+ // Flag indicating whether writecache will be sealed async.
+ bool async = 3;
+
+ // If true, then writecache will be sealed, but mode will be restored to the current one.
+ bool restore_mode = 4;
+
+ // If true, then writecache will shrink internal storage.
+ bool shrink = 5;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message SealWriteCacheResponse {
+ message Body {
+ message Status {
+ bytes shard_ID = 1;
+ bool success = 2;
+ string error = 3;
+ }
+ repeated Status results = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+message DetachShardsRequest {
+ message Body { repeated bytes shard_ID = 1; }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message DetachShardsResponse {
+ message Body {}
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+message StartShardRebuildRequest {
+ message Body {
+ repeated bytes shard_ID = 1;
+ uint32 target_fill_percent = 2;
+ uint32 concurrency_limit = 3;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message StartShardRebuildResponse {
+ message Body {
+ message Status {
+ bytes shard_ID = 1;
+ bool success = 2;
+ string error = 3;
+ }
+ repeated Status results = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
+
+message ListShardsForObjectRequest {
+ message Body {
+ string object_id = 1;
+ string container_id = 2;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message ListShardsForObjectResponse {
+ message Body {
+ // List of the node's shards storing object.
+ repeated bytes shard_ID = 1;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 5bb119090d..44849d591a 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -2,57 +2,137 @@
package control
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
+)
+
+type HealthCheckRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil)
+ _ json.Marshaler = (*HealthCheckRequest_Body)(nil)
+ _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthCheckRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckRequest struct {
+ Body *HealthCheckRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil)
+ _ json.Marshaler = (*HealthCheckRequest)(nil)
+ _ json.Unmarshaler = (*HealthCheckRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthCheckRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -68,73 +148,401 @@ func (x *HealthCheckRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthCheckRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthCheckRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
+ x.Body = v
+}
+func (x *HealthCheckRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthCheckRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthCheckRequest_Body
+ f = new(HealthCheckRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckResponse_Body struct {
+ NetmapStatus NetmapStatus `json:"netmapStatus"`
+ HealthStatus HealthStatus `json:"healthStatus"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil)
+ _ json.Marshaler = (*HealthCheckResponse_Body)(nil)
+ _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthCheckResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.EnumSize(1, int32(x.NetmapStatus))
size += proto.EnumSize(2, int32(x.HealthStatus))
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.EnumMarshal(1, buf[offset:], int32(x.NetmapStatus))
- offset += proto.EnumMarshal(2, buf[offset:], int32(x.HealthStatus))
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.NetmapStatus) != 0 {
+ mm.AppendInt32(1, int32(x.NetmapStatus))
+ }
+ if int32(x.HealthStatus) != 0 {
+ mm.AppendInt32(2, int32(x.HealthStatus))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // NetmapStatus
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NetmapStatus")
+ }
+ x.NetmapStatus = NetmapStatus(data)
+ case 2: // HealthStatus
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "HealthStatus")
+ }
+ x.HealthStatus = HealthStatus(data)
+ }
+ }
+ return nil
+}
+func (x *HealthCheckResponse_Body) GetNetmapStatus() NetmapStatus {
+ if x != nil {
+ return x.NetmapStatus
+ }
+ return 0
+}
+func (x *HealthCheckResponse_Body) SetNetmapStatus(v NetmapStatus) {
+ x.NetmapStatus = v
+}
+func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
+ if x != nil {
+ return x.HealthStatus
+ }
+ return 0
+}
+func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
+ x.HealthStatus = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"netmapStatus\":"
+ out.RawString(prefix)
+ v := int32(x.NetmapStatus)
+ if vv, ok := NetmapStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"healthStatus\":"
+ out.RawString(prefix)
+ v := int32(x.HealthStatus)
+ if vv, ok := HealthStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "netmapStatus":
+ {
+ var f NetmapStatus
+ var parsedValue NetmapStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := NetmapStatus_value[v]; ok {
+ parsedValue = NetmapStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = NetmapStatus(vv)
+ case float64:
+ parsedValue = NetmapStatus(v)
+ }
+ f = parsedValue
+ x.NetmapStatus = f
+ }
+ case "healthStatus":
+ {
+ var f HealthStatus
+ var parsedValue HealthStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := HealthStatus_value[v]; ok {
+ parsedValue = HealthStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = HealthStatus(vv)
+ case float64:
+ parsedValue = HealthStatus(v)
+ }
+ f = parsedValue
+ x.HealthStatus = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckResponse struct {
+ Body *HealthCheckResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil)
+ _ json.Marshaler = (*HealthCheckResponse)(nil)
+ _ json.Unmarshaler = (*HealthCheckResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthCheckResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -150,73 +558,380 @@ func (x *HealthCheckResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthCheckResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthCheckResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
+ x.Body = v
+}
+func (x *HealthCheckResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthCheckResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthCheckResponse_Body
+ f = new(HealthCheckResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetNetmapStatusRequest_Body struct {
+ Status NetmapStatus `json:"status"`
+ ForceMaintenance bool `json:"forceMaintenance"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest_Body)(nil)
+ _ json.Marshaler = (*SetNetmapStatusRequest_Body)(nil)
+ _ json.Unmarshaler = (*SetNetmapStatusRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SetNetmapStatusRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.EnumSize(1, int32(x.Status))
size += proto.BoolSize(2, x.ForceMaintenance)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetNetmapStatusRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.EnumMarshal(1, buf[offset:], int32(x.Status))
- offset += proto.BoolMarshal(2, buf[offset:], x.ForceMaintenance)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.Status) != 0 {
+ mm.AppendInt32(1, int32(x.Status))
+ }
+ if x.ForceMaintenance {
+ mm.AppendBool(2, x.ForceMaintenance)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Status
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Status")
+ }
+ x.Status = NetmapStatus(data)
+ case 2: // ForceMaintenance
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ForceMaintenance")
+ }
+ x.ForceMaintenance = data
+ }
+ }
+ return nil
+}
+func (x *SetNetmapStatusRequest_Body) GetStatus() NetmapStatus {
+ if x != nil {
+ return x.Status
+ }
+ return 0
+}
+func (x *SetNetmapStatusRequest_Body) SetStatus(v NetmapStatus) {
+ x.Status = v
+}
+func (x *SetNetmapStatusRequest_Body) GetForceMaintenance() bool {
+ if x != nil {
+ return x.ForceMaintenance
+ }
+ return false
+}
+func (x *SetNetmapStatusRequest_Body) SetForceMaintenance(v bool) {
+ x.ForceMaintenance = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"status\":"
+ out.RawString(prefix)
+ v := int32(x.Status)
+ if vv, ok := NetmapStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"forceMaintenance\":"
+ out.RawString(prefix)
+ out.Bool(x.ForceMaintenance)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "status":
+ {
+ var f NetmapStatus
+ var parsedValue NetmapStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := NetmapStatus_value[v]; ok {
+ parsedValue = NetmapStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = NetmapStatus(vv)
+ case float64:
+ parsedValue = NetmapStatus(v)
+ }
+ f = parsedValue
+ x.Status = f
+ }
+ case "forceMaintenance":
+ {
+ var f bool
+ f = in.Bool()
+ x.ForceMaintenance = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetNetmapStatusRequest struct {
+ Body *SetNetmapStatusRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest)(nil)
+ _ json.Marshaler = (*SetNetmapStatusRequest)(nil)
+ _ json.Unmarshaler = (*SetNetmapStatusRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SetNetmapStatusRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetNetmapStatusRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -232,62 +947,283 @@ func (x *SetNetmapStatusRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SetNetmapStatusRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SetNetmapStatusRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SetNetmapStatusRequest) GetBody() *SetNetmapStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SetNetmapStatusRequest) SetBody(v *SetNetmapStatusRequest_Body) {
+ x.Body = v
+}
+func (x *SetNetmapStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SetNetmapStatusRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetNetmapStatusRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetNetmapStatusRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SetNetmapStatusRequest_Body
+ f = new(SetNetmapStatusRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetNetmapStatusResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse_Body)(nil)
+ _ json.Marshaler = (*SetNetmapStatusResponse_Body)(nil)
+ _ json.Unmarshaler = (*SetNetmapStatusResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SetNetmapStatusResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetNetmapStatusResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetNetmapStatusResponse struct {
+ Body *SetNetmapStatusResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse)(nil)
+ _ json.Marshaler = (*SetNetmapStatusResponse)(nil)
+ _ json.Unmarshaler = (*SetNetmapStatusResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SetNetmapStatusResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetNetmapStatusResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -303,69 +1239,1036 @@ func (x *SetNetmapStatusResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SetNetmapStatusResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SetNetmapStatusResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SetNetmapStatusResponse) GetBody() *SetNetmapStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SetNetmapStatusResponse) SetBody(v *SetNetmapStatusResponse_Body) {
+ x.Body = v
+}
+func (x *SetNetmapStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SetNetmapStatusResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetNetmapStatusResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetNetmapStatusResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SetNetmapStatusResponse_Body
+ f = new(SetNetmapStatusResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNetmapStatusRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest_Body)(nil)
+ _ json.Marshaler = (*GetNetmapStatusRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetNetmapStatusRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
-func (x *DropObjectsRequest_Body) StableSize() (size int) {
- size += proto.RepeatedBytesSize(1, x.AddressList)
+func (x *GetNetmapStatusRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DropObjectsRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.AddressList)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNetmapStatusRequest struct {
+ Body *GetNetmapStatusRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest)(nil)
+ _ json.Marshaler = (*GetNetmapStatusRequest)(nil)
+ _ json.Unmarshaler = (*GetNetmapStatusRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
-func (x *DropObjectsRequest) StableSize() (size int) {
+func (x *GetNetmapStatusRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
// Returns any error encountered which did not allow writing the data completely.
// Otherwise, returns the buffer in which the data is written.
//
-// Structures with the same field values have the same binary format.
-func (x *DropObjectsRequest) StableMarshal(buf []byte) []byte {
+// Structures with the same field values have the same signed data.
+func (x *GetNetmapStatusRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *GetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
- return []byte{}
+ return
}
- if buf == nil {
- buf = make([]byte, x.StableSize())
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetNetmapStatusRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNetmapStatusRequest) GetBody() *GetNetmapStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetNetmapStatusRequest) SetBody(v *GetNetmapStatusRequest_Body) {
+ x.Body = v
+}
+func (x *GetNetmapStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetNetmapStatusRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNetmapStatusRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNetmapStatusRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetNetmapStatusRequest_Body
+ f = new(GetNetmapStatusRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNetmapStatusResponse_Body struct {
+ Status NetmapStatus `json:"status"`
+ Epoch uint64 `json:"epoch"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse_Body)(nil)
+ _ json.Marshaler = (*GetNetmapStatusResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetNetmapStatusResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetNetmapStatusResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.EnumSize(1, int32(x.Status))
+ size += proto.UInt64Size(2, x.Epoch)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.Status) != 0 {
+ mm.AppendInt32(1, int32(x.Status))
+ }
+ if x.Epoch != 0 {
+ mm.AppendUint64(2, x.Epoch)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Status
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Status")
+ }
+ x.Status = NetmapStatus(data)
+ case 2: // Epoch
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Epoch")
+ }
+ x.Epoch = data
+ }
+ }
+ return nil
+}
+func (x *GetNetmapStatusResponse_Body) GetStatus() NetmapStatus {
+ if x != nil {
+ return x.Status
+ }
+ return 0
+}
+func (x *GetNetmapStatusResponse_Body) SetStatus(v NetmapStatus) {
+ x.Status = v
+}
+func (x *GetNetmapStatusResponse_Body) GetEpoch() uint64 {
+ if x != nil {
+ return x.Epoch
+ }
+ return 0
+}
+func (x *GetNetmapStatusResponse_Body) SetEpoch(v uint64) {
+ x.Epoch = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"status\":"
+ out.RawString(prefix)
+ v := int32(x.Status)
+ if vv, ok := NetmapStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"epoch\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "status":
+ {
+ var f NetmapStatus
+ var parsedValue NetmapStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := NetmapStatus_value[v]; ok {
+ parsedValue = NetmapStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = NetmapStatus(vv)
+ case float64:
+ parsedValue = NetmapStatus(v)
+ }
+ f = parsedValue
+ x.Status = f
+ }
+ case "epoch":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.Epoch = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNetmapStatusResponse struct {
+ Body *GetNetmapStatusResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse)(nil)
+ _ json.Marshaler = (*GetNetmapStatusResponse)(nil)
+ _ json.Unmarshaler = (*GetNetmapStatusResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetNetmapStatusResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *GetNetmapStatusResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *GetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetNetmapStatusResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNetmapStatusResponse) GetBody() *GetNetmapStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetNetmapStatusResponse) SetBody(v *GetNetmapStatusResponse_Body) {
+ x.Body = v
+}
+func (x *GetNetmapStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetNetmapStatusResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNetmapStatusResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNetmapStatusResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetNetmapStatusResponse_Body
+ f = new(GetNetmapStatusResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DropObjectsRequest_Body struct {
+ AddressList [][]byte `json:"addressList"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DropObjectsRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DropObjectsRequest_Body)(nil)
+ _ json.Marshaler = (*DropObjectsRequest_Body)(nil)
+ _ json.Unmarshaler = (*DropObjectsRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DropObjectsRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.AddressList)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DropObjectsRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DropObjectsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.AddressList {
+ mm.AppendBytes(1, x.AddressList[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DropObjectsRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // AddressList
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "AddressList")
+ }
+ x.AddressList = append(x.AddressList, data)
+ }
+ }
+ return nil
+}
+func (x *DropObjectsRequest_Body) GetAddressList() [][]byte {
+ if x != nil {
+ return x.AddressList
+ }
+ return nil
+}
+func (x *DropObjectsRequest_Body) SetAddressList(v [][]byte) {
+ x.AddressList = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DropObjectsRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DropObjectsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"addressList\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.AddressList {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.AddressList[i] != nil {
+ out.Base64Bytes(x.AddressList[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DropObjectsRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DropObjectsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "addressList":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.AddressList = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DropObjectsRequest struct {
+ Body *DropObjectsRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DropObjectsRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*DropObjectsRequest)(nil)
+ _ json.Marshaler = (*DropObjectsRequest)(nil)
+ _ json.Unmarshaler = (*DropObjectsRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DropObjectsRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
}
// ReadSignedData fills buf with signed data of x.
@@ -383,62 +2286,283 @@ func (x *DropObjectsRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DropObjectsRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *DropObjectsRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DropObjectsRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DropObjectsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DropObjectsRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DropObjectsRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DropObjectsRequest) GetBody() *DropObjectsRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DropObjectsRequest) SetBody(v *DropObjectsRequest_Body) {
+ x.Body = v
+}
+func (x *DropObjectsRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DropObjectsRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DropObjectsRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DropObjectsRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DropObjectsRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DropObjectsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DropObjectsRequest_Body
+ f = new(DropObjectsRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DropObjectsResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DropObjectsResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DropObjectsResponse_Body)(nil)
+ _ json.Marshaler = (*DropObjectsResponse_Body)(nil)
+ _ json.Unmarshaler = (*DropObjectsResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *DropObjectsResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DropObjectsResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DropObjectsResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DropObjectsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DropObjectsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DropObjectsResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DropObjectsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DropObjectsResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DropObjectsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DropObjectsResponse struct {
+ Body *DropObjectsResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DropObjectsResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*DropObjectsResponse)(nil)
+ _ json.Marshaler = (*DropObjectsResponse)(nil)
+ _ json.Unmarshaler = (*DropObjectsResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *DropObjectsResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DropObjectsResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -454,62 +2578,283 @@ func (x *DropObjectsResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DropObjectsResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *DropObjectsResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DropObjectsResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DropObjectsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DropObjectsResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DropObjectsResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DropObjectsResponse) GetBody() *DropObjectsResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DropObjectsResponse) SetBody(v *DropObjectsResponse_Body) {
+ x.Body = v
+}
+func (x *DropObjectsResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DropObjectsResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DropObjectsResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DropObjectsResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DropObjectsResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DropObjectsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DropObjectsResponse_Body
+ f = new(DropObjectsResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsRequest_Body)(nil)
+ _ json.Marshaler = (*ListShardsRequest_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *ListShardsRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListShardsRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsRequest struct {
+ Body *ListShardsRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsRequest)(nil)
+ _ json.Marshaler = (*ListShardsRequest)(nil)
+ _ json.Unmarshaler = (*ListShardsRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *ListShardsRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListShardsRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -525,75 +2870,344 @@ func (x *ListShardsRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListShardsRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ListShardsRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsRequest) GetBody() *ListShardsRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsRequest) SetBody(v *ListShardsRequest_Body) {
+ x.Body = v
+}
+func (x *ListShardsRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsRequest_Body
+ f = new(ListShardsRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsResponse_Body struct {
+ Shards []ShardInfo `json:"shards"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsResponse_Body)(nil)
+ _ json.Marshaler = (*ListShardsResponse_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *ListShardsResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
for i := range x.Shards {
- size += proto.NestedStructureSize(1, x.Shards[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Shards[i])
}
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListShardsResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- for i := range x.Shards {
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Shards[i])
- }
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Shards {
+ x.Shards[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shards
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shards")
+ }
+ x.Shards = append(x.Shards, ShardInfo{})
+ ff := &x.Shards[len(x.Shards)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsResponse_Body) GetShards() []ShardInfo {
+ if x != nil {
+ return x.Shards
+ }
+ return nil
+}
+func (x *ListShardsResponse_Body) SetShards(v []ShardInfo) {
+ x.Shards = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shards\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shards {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Shards[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shards":
+ {
+ var f ShardInfo
+ var list []ShardInfo
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = ShardInfo{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shards = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsResponse struct {
+ Body *ListShardsResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsResponse)(nil)
+ _ json.Marshaler = (*ListShardsResponse)(nil)
+ _ json.Unmarshaler = (*ListShardsResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *ListShardsResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListShardsResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -609,75 +3223,440 @@ func (x *ListShardsResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListShardsResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ListShardsResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsResponse) GetBody() *ListShardsResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) {
+ x.Body = v
+}
+func (x *ListShardsResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsResponse_Body
+ f = new(ListShardsResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetShardModeRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ Mode ShardMode `json:"mode"`
+ ResetErrorCounter bool `json:"resetErrorCounter"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetShardModeRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetShardModeRequest_Body)(nil)
+ _ json.Marshaler = (*SetShardModeRequest_Body)(nil)
+ _ json.Unmarshaler = (*SetShardModeRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SetShardModeRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.EnumSize(2, int32(x.Mode))
size += proto.BoolSize(3, x.ResetErrorCounter)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetShardModeRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.EnumMarshal(2, buf[offset:], int32(x.Mode))
- offset += proto.BoolMarshal(3, buf[offset:], x.ResetErrorCounter)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetShardModeRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetShardModeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if int32(x.Mode) != 0 {
+ mm.AppendInt32(2, int32(x.Mode))
+ }
+ if x.ResetErrorCounter {
+ mm.AppendBool(3, x.ResetErrorCounter)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetShardModeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // Mode
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Mode")
+ }
+ x.Mode = ShardMode(data)
+ case 3: // ResetErrorCounter
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ResetErrorCounter")
+ }
+ x.ResetErrorCounter = data
+ }
+ }
+ return nil
+}
+func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *SetShardModeRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *SetShardModeRequest_Body) GetMode() ShardMode {
+ if x != nil {
+ return x.Mode
+ }
+ return 0
+}
+func (x *SetShardModeRequest_Body) SetMode(v ShardMode) {
+ x.Mode = v
+}
+func (x *SetShardModeRequest_Body) GetResetErrorCounter() bool {
+ if x != nil {
+ return x.ResetErrorCounter
+ }
+ return false
+}
+func (x *SetShardModeRequest_Body) SetResetErrorCounter(v bool) {
+ x.ResetErrorCounter = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetShardModeRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetShardModeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"mode\":"
+ out.RawString(prefix)
+ v := int32(x.Mode)
+ if vv, ok := ShardMode_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"resetErrorCounter\":"
+ out.RawString(prefix)
+ out.Bool(x.ResetErrorCounter)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetShardModeRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetShardModeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "mode":
+ {
+ var f ShardMode
+ var parsedValue ShardMode
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := ShardMode_value[v]; ok {
+ parsedValue = ShardMode(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = ShardMode(vv)
+ case float64:
+ parsedValue = ShardMode(v)
+ }
+ f = parsedValue
+ x.Mode = f
+ }
+ case "resetErrorCounter":
+ {
+ var f bool
+ f = in.Bool()
+ x.ResetErrorCounter = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetShardModeRequest struct {
+ Body *SetShardModeRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetShardModeRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetShardModeRequest)(nil)
+ _ json.Marshaler = (*SetShardModeRequest)(nil)
+ _ json.Unmarshaler = (*SetShardModeRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SetShardModeRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetShardModeRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -693,62 +3672,283 @@ func (x *SetShardModeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetShardModeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SetShardModeRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetShardModeRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetShardModeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetShardModeRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SetShardModeRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SetShardModeRequest) GetBody() *SetShardModeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SetShardModeRequest) SetBody(v *SetShardModeRequest_Body) {
+ x.Body = v
+}
+func (x *SetShardModeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SetShardModeRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetShardModeRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetShardModeRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetShardModeRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetShardModeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SetShardModeRequest_Body
+ f = new(SetShardModeRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetShardModeResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetShardModeResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetShardModeResponse_Body)(nil)
+ _ json.Marshaler = (*SetShardModeResponse_Body)(nil)
+ _ json.Unmarshaler = (*SetShardModeResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SetShardModeResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetShardModeResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetShardModeResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetShardModeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetShardModeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetShardModeResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetShardModeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetShardModeResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetShardModeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetShardModeResponse struct {
+ Body *SetShardModeResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetShardModeResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetShardModeResponse)(nil)
+ _ json.Marshaler = (*SetShardModeResponse)(nil)
+ _ json.Unmarshaler = (*SetShardModeResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SetShardModeResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetShardModeResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -764,385 +3964,415 @@ func (x *SetShardModeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetShardModeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SetShardModeResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetShardModeResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DumpShardRequest_Body) StableSize() (size int) {
- size += proto.BytesSize(1, x.Shard_ID)
- size += proto.StringSize(2, x.Filepath)
- size += proto.BoolSize(3, x.IgnoreErrors)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DumpShardRequest_Body) StableMarshal(buf []byte) []byte {
+func (x *SetShardModeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
- return []byte{}
+ return
}
- if buf == nil {
- buf = make([]byte, x.StableSize())
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.StringMarshal(2, buf[offset:], x.Filepath)
- offset += proto.BoolMarshal(3, buf[offset:], x.IgnoreErrors)
- return buf
}
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DumpShardRequest) StableSize() (size int) {
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetShardModeResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SetShardModeResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SetShardModeResponse) GetBody() *SetShardModeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) {
+ x.Body = v
+}
+func (x *SetShardModeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SetShardModeResponse) SetSignature(v *Signature) {
+ x.Signature = v
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DumpShardRequest) StableMarshal(buf []byte) []byte {
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetShardModeResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetShardModeResponse) MarshalEasyJSON(out *jwriter.Writer) {
if x == nil {
- return []byte{}
+ out.RawString("null")
+ return
}
- if buf == nil {
- buf = make([]byte, x.StableSize())
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
}
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *DumpShardRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *DumpShardRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
-}
-
-func (x *DumpShardRequest) SetSignature(sig *Signature) {
- x.Signature = sig
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DumpShardResponse_Body) StableSize() (size int) {
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DumpShardResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DumpShardResponse) StableSize() (size int) {
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DumpShardResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
}
- if buf == nil {
- buf = make([]byte, x.StableSize())
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetShardModeResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetShardModeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
}
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *DumpShardResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *DumpShardResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
-}
-
-func (x *DumpShardResponse) SetSignature(sig *Signature) {
- x.Signature = sig
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RestoreShardRequest_Body) StableSize() (size int) {
- size += proto.BytesSize(1, x.Shard_ID)
- size += proto.StringSize(2, x.Filepath)
- size += proto.BoolSize(3, x.IgnoreErrors)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RestoreShardRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SetShardModeResponse_Body
+ f = new(SetShardModeResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
}
- if buf == nil {
- buf = make([]byte, x.StableSize())
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
}
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.StringMarshal(2, buf[offset:], x.Filepath)
- offset += proto.BoolMarshal(3, buf[offset:], x.IgnoreErrors)
- return buf
}
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RestoreShardRequest) StableSize() (size int) {
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
+type SynchronizeTreeRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ Height uint64 `json:"height"`
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RestoreShardRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RestoreShardRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RestoreShardRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
-}
-
-func (x *RestoreShardRequest) SetSignature(sig *Signature) {
- x.Signature = sig
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RestoreShardResponse_Body) StableSize() (size int) {
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RestoreShardResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RestoreShardResponse) StableSize() (size int) {
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RestoreShardResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RestoreShardResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RestoreShardResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
-}
-
-func (x *RestoreShardResponse) SetSignature(sig *Signature) {
- x.Signature = sig
-}
+var (
+ _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest_Body)(nil)
+ _ json.Marshaler = (*SynchronizeTreeRequest_Body)(nil)
+ _ json.Unmarshaler = (*SynchronizeTreeRequest_Body)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SynchronizeTreeRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
size += proto.UInt64Size(3, x.Height)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SynchronizeTreeRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.Height)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SynchronizeTreeRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SynchronizeTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.Height != 0 {
+ mm.AppendUint64(3, x.Height)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SynchronizeTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // Height
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Height")
+ }
+ x.Height = data
+ }
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *SynchronizeTreeRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *SynchronizeTreeRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *SynchronizeTreeRequest_Body) GetHeight() uint64 {
+ if x != nil {
+ return x.Height
+ }
+ return 0
+}
+func (x *SynchronizeTreeRequest_Body) SetHeight(v uint64) {
+ x.Height = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SynchronizeTreeRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SynchronizeTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"height\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SynchronizeTreeRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "height":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.Height = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SynchronizeTreeRequest struct {
+ Body *SynchronizeTreeRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest)(nil)
+ _ json.Marshaler = (*SynchronizeTreeRequest)(nil)
+ _ json.Unmarshaler = (*SynchronizeTreeRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SynchronizeTreeRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SynchronizeTreeRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1158,62 +4388,283 @@ func (x *SynchronizeTreeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SynchronizeTreeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SynchronizeTreeRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SynchronizeTreeRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SynchronizeTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SynchronizeTreeRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SynchronizeTreeRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) {
+ x.Body = v
+}
+func (x *SynchronizeTreeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SynchronizeTreeRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SynchronizeTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SynchronizeTreeRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SynchronizeTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SynchronizeTreeRequest_Body
+ f = new(SynchronizeTreeRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SynchronizeTreeResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse_Body)(nil)
+ _ json.Marshaler = (*SynchronizeTreeResponse_Body)(nil)
+ _ json.Unmarshaler = (*SynchronizeTreeResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SynchronizeTreeResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SynchronizeTreeResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SynchronizeTreeResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SynchronizeTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SynchronizeTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SynchronizeTreeResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SynchronizeTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SynchronizeTreeResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SynchronizeTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SynchronizeTreeResponse struct {
+ Body *SynchronizeTreeResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse)(nil)
+ _ json.Marshaler = (*SynchronizeTreeResponse)(nil)
+ _ json.Unmarshaler = (*SynchronizeTreeResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *SynchronizeTreeResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SynchronizeTreeResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1229,73 +4680,383 @@ func (x *SynchronizeTreeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SynchronizeTreeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SynchronizeTreeResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SynchronizeTreeResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SynchronizeTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SynchronizeTreeResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SynchronizeTreeResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SynchronizeTreeResponse) SetBody(v *SynchronizeTreeResponse_Body) {
+ x.Body = v
+}
+func (x *SynchronizeTreeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SynchronizeTreeResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SynchronizeTreeResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SynchronizeTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SynchronizeTreeResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SynchronizeTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SynchronizeTreeResponse_Body
+ f = new(SynchronizeTreeResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type EvacuateShardRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*EvacuateShardRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest_Body)(nil)
+ _ json.Marshaler = (*EvacuateShardRequest_Body)(nil)
+ _ json.Unmarshaler = (*EvacuateShardRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *EvacuateShardRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *EvacuateShardRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *EvacuateShardRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *EvacuateShardRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.IgnoreErrors {
+ mm.AppendBool(2, x.IgnoreErrors)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *EvacuateShardRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // IgnoreErrors
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors")
+ }
+ x.IgnoreErrors = data
+ }
+ }
+ return nil
+}
+func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *EvacuateShardRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *EvacuateShardRequest_Body) GetIgnoreErrors() bool {
+ if x != nil {
+ return x.IgnoreErrors
+ }
+ return false
+}
+func (x *EvacuateShardRequest_Body) SetIgnoreErrors(v bool) {
+ x.IgnoreErrors = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *EvacuateShardRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *EvacuateShardRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"ignoreErrors\":"
+ out.RawString(prefix)
+ out.Bool(x.IgnoreErrors)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *EvacuateShardRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *EvacuateShardRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "ignoreErrors":
+ {
+ var f bool
+ f = in.Bool()
+ x.IgnoreErrors = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type EvacuateShardRequest struct {
+ Body *EvacuateShardRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*EvacuateShardRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest)(nil)
+ _ json.Marshaler = (*EvacuateShardRequest)(nil)
+ _ json.Unmarshaler = (*EvacuateShardRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *EvacuateShardRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *EvacuateShardRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1311,71 +5072,331 @@ func (x *EvacuateShardRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *EvacuateShardRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *EvacuateShardRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *EvacuateShardRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *EvacuateShardRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *EvacuateShardRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(EvacuateShardRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *EvacuateShardRequest) SetBody(v *EvacuateShardRequest_Body) {
+ x.Body = v
+}
+func (x *EvacuateShardRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *EvacuateShardRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *EvacuateShardRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *EvacuateShardRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *EvacuateShardRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *EvacuateShardRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *EvacuateShardRequest_Body
+ f = new(EvacuateShardRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type EvacuateShardResponse_Body struct {
+ Count uint32 `json:"count"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*EvacuateShardResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse_Body)(nil)
+ _ json.Marshaler = (*EvacuateShardResponse_Body)(nil)
+ _ json.Unmarshaler = (*EvacuateShardResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *EvacuateShardResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.UInt32Size(1, x.Count)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *EvacuateShardResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt32Marshal(1, buf[offset:], x.Count)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *EvacuateShardResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *EvacuateShardResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Count != 0 {
+ mm.AppendUint32(1, x.Count)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *EvacuateShardResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Count
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Count")
+ }
+ x.Count = data
+ }
+ }
+ return nil
+}
+func (x *EvacuateShardResponse_Body) GetCount() uint32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+func (x *EvacuateShardResponse_Body) SetCount(v uint32) {
+ x.Count = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *EvacuateShardResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *EvacuateShardResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"count\":"
+ out.RawString(prefix)
+ out.Uint32(x.Count)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *EvacuateShardResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *EvacuateShardResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "count":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Count = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type EvacuateShardResponse struct {
+ Body *EvacuateShardResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*EvacuateShardResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse)(nil)
+ _ json.Marshaler = (*EvacuateShardResponse)(nil)
+ _ json.Unmarshaler = (*EvacuateShardResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *EvacuateShardResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *EvacuateShardResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1391,71 +5412,383 @@ func (x *EvacuateShardResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *EvacuateShardResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *EvacuateShardResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *EvacuateShardResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *EvacuateShardResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *EvacuateShardResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(EvacuateShardResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *EvacuateShardResponse) SetBody(v *EvacuateShardResponse_Body) {
+ x.Body = v
+}
+func (x *EvacuateShardResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *EvacuateShardResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *EvacuateShardResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *EvacuateShardResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *EvacuateShardResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *EvacuateShardResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *EvacuateShardResponse_Body
+ f = new(EvacuateShardResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type FlushCacheRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ Seal bool `json:"seal"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*FlushCacheRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*FlushCacheRequest_Body)(nil)
+ _ json.Marshaler = (*FlushCacheRequest_Body)(nil)
+ _ json.Unmarshaler = (*FlushCacheRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *FlushCacheRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ size += proto.BoolSize(2, x.Seal)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *FlushCacheRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *FlushCacheRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *FlushCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.Seal {
+ mm.AppendBool(2, x.Seal)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *FlushCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // Seal
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Seal")
+ }
+ x.Seal = data
+ }
+ }
+ return nil
+}
+func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *FlushCacheRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *FlushCacheRequest_Body) GetSeal() bool {
+ if x != nil {
+ return x.Seal
+ }
+ return false
+}
+func (x *FlushCacheRequest_Body) SetSeal(v bool) {
+ x.Seal = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *FlushCacheRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *FlushCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"seal\":"
+ out.RawString(prefix)
+ out.Bool(x.Seal)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *FlushCacheRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *FlushCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "seal":
+ {
+ var f bool
+ f = in.Bool()
+ x.Seal = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type FlushCacheRequest struct {
+ Body *FlushCacheRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*FlushCacheRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*FlushCacheRequest)(nil)
+ _ json.Marshaler = (*FlushCacheRequest)(nil)
+ _ json.Unmarshaler = (*FlushCacheRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *FlushCacheRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *FlushCacheRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1471,62 +5804,283 @@ func (x *FlushCacheRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *FlushCacheRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *FlushCacheRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *FlushCacheRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *FlushCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *FlushCacheRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(FlushCacheRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *FlushCacheRequest) SetBody(v *FlushCacheRequest_Body) {
+ x.Body = v
+}
+func (x *FlushCacheRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *FlushCacheRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *FlushCacheRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *FlushCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *FlushCacheRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *FlushCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *FlushCacheRequest_Body
+ f = new(FlushCacheRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type FlushCacheResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*FlushCacheResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*FlushCacheResponse_Body)(nil)
+ _ json.Marshaler = (*FlushCacheResponse_Body)(nil)
+ _ json.Unmarshaler = (*FlushCacheResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *FlushCacheResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *FlushCacheResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *FlushCacheResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *FlushCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *FlushCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *FlushCacheResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *FlushCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *FlushCacheResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *FlushCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type FlushCacheResponse struct {
+ Body *FlushCacheResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*FlushCacheResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*FlushCacheResponse)(nil)
+ _ json.Marshaler = (*FlushCacheResponse)(nil)
+ _ json.Unmarshaler = (*FlushCacheResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *FlushCacheResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *FlushCacheResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1542,9 +6096,11934 @@ func (x *FlushCacheResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *FlushCacheResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *FlushCacheResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *FlushCacheResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *FlushCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *FlushCacheResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(FlushCacheResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *FlushCacheResponse) SetBody(v *FlushCacheResponse_Body) {
+ x.Body = v
+}
+func (x *FlushCacheResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *FlushCacheResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *FlushCacheResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *FlushCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *FlushCacheResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *FlushCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *FlushCacheResponse_Body
+ f = new(FlushCacheResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DoctorRequest_Body struct {
+ Concurrency uint32 `json:"concurrency"`
+ RemoveDuplicates bool `json:"removeDuplicates"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DoctorRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DoctorRequest_Body)(nil)
+ _ json.Marshaler = (*DoctorRequest_Body)(nil)
+ _ json.Unmarshaler = (*DoctorRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DoctorRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.UInt32Size(1, x.Concurrency)
+ size += proto.BoolSize(2, x.RemoveDuplicates)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DoctorRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DoctorRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Concurrency != 0 {
+ mm.AppendUint32(1, x.Concurrency)
+ }
+ if x.RemoveDuplicates {
+ mm.AppendBool(2, x.RemoveDuplicates)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DoctorRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DoctorRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Concurrency
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Concurrency")
+ }
+ x.Concurrency = data
+ case 2: // RemoveDuplicates
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RemoveDuplicates")
+ }
+ x.RemoveDuplicates = data
+ }
+ }
+ return nil
+}
+func (x *DoctorRequest_Body) GetConcurrency() uint32 {
+ if x != nil {
+ return x.Concurrency
+ }
+ return 0
+}
+func (x *DoctorRequest_Body) SetConcurrency(v uint32) {
+ x.Concurrency = v
+}
+func (x *DoctorRequest_Body) GetRemoveDuplicates() bool {
+ if x != nil {
+ return x.RemoveDuplicates
+ }
+ return false
+}
+func (x *DoctorRequest_Body) SetRemoveDuplicates(v bool) {
+ x.RemoveDuplicates = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DoctorRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DoctorRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"concurrency\":"
+ out.RawString(prefix)
+ out.Uint32(x.Concurrency)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"removeDuplicates\":"
+ out.RawString(prefix)
+ out.Bool(x.RemoveDuplicates)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DoctorRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DoctorRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "concurrency":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Concurrency = f
+ }
+ case "removeDuplicates":
+ {
+ var f bool
+ f = in.Bool()
+ x.RemoveDuplicates = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DoctorRequest struct {
+ Body *DoctorRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DoctorRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*DoctorRequest)(nil)
+ _ json.Marshaler = (*DoctorRequest)(nil)
+ _ json.Unmarshaler = (*DoctorRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DoctorRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *DoctorRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *DoctorRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DoctorRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DoctorRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DoctorRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DoctorRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DoctorRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DoctorRequest) GetBody() *DoctorRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DoctorRequest) SetBody(v *DoctorRequest_Body) {
+ x.Body = v
+}
+func (x *DoctorRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DoctorRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DoctorRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DoctorRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DoctorRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DoctorRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DoctorRequest_Body
+ f = new(DoctorRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DoctorResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DoctorResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DoctorResponse_Body)(nil)
+ _ json.Marshaler = (*DoctorResponse_Body)(nil)
+ _ json.Unmarshaler = (*DoctorResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DoctorResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DoctorResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DoctorResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DoctorResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DoctorResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DoctorResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DoctorResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DoctorResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DoctorResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DoctorResponse struct {
+ Body *DoctorResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DoctorResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*DoctorResponse)(nil)
+ _ json.Marshaler = (*DoctorResponse)(nil)
+ _ json.Unmarshaler = (*DoctorResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DoctorResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *DoctorResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *DoctorResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DoctorResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DoctorResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DoctorResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DoctorResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DoctorResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DoctorResponse) GetBody() *DoctorResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DoctorResponse) SetBody(v *DoctorResponse_Body) {
+ x.Body = v
+}
+func (x *DoctorResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DoctorResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DoctorResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DoctorResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DoctorResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DoctorResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DoctorResponse_Body
+ f = new(DoctorResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardEvacuationRequest_Body_Scope int32
+
+const (
+ StartShardEvacuationRequest_Body_NONE StartShardEvacuationRequest_Body_Scope = 0
+ StartShardEvacuationRequest_Body_OBJECTS StartShardEvacuationRequest_Body_Scope = 1
+ StartShardEvacuationRequest_Body_TREES StartShardEvacuationRequest_Body_Scope = 2
+)
+
+var (
+ StartShardEvacuationRequest_Body_Scope_name = map[int32]string{
+ 0: "NONE",
+ 1: "OBJECTS",
+ 2: "TREES",
+ }
+ StartShardEvacuationRequest_Body_Scope_value = map[string]int32{
+ "NONE": 0,
+ "OBJECTS": 1,
+ "TREES": 2,
+ }
+)
+
+func (x StartShardEvacuationRequest_Body_Scope) String() string {
+ if v, ok := StartShardEvacuationRequest_Body_Scope_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool {
+ if v, ok := StartShardEvacuationRequest_Body_Scope_value[s]; ok {
+ *x = StartShardEvacuationRequest_Body_Scope(v)
+ return true
+ }
+ return false
+}
+
+type StartShardEvacuationRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+ Scope uint32 `json:"scope"`
+ ContainerWorkerCount uint32 `json:"containerWorkerCount"`
+ ObjectWorkerCount uint32 `json:"objectWorkerCount"`
+ RepOneOnly bool `json:"repOneOnly"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest_Body)(nil)
+ _ json.Marshaler = (*StartShardEvacuationRequest_Body)(nil)
+ _ json.Unmarshaler = (*StartShardEvacuationRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardEvacuationRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ size += proto.BoolSize(2, x.IgnoreErrors)
+ size += proto.UInt32Size(3, x.Scope)
+ size += proto.UInt32Size(4, x.ContainerWorkerCount)
+ size += proto.UInt32Size(5, x.ObjectWorkerCount)
+ size += proto.BoolSize(6, x.RepOneOnly)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.IgnoreErrors {
+ mm.AppendBool(2, x.IgnoreErrors)
+ }
+ if x.Scope != 0 {
+ mm.AppendUint32(3, x.Scope)
+ }
+ if x.ContainerWorkerCount != 0 {
+ mm.AppendUint32(4, x.ContainerWorkerCount)
+ }
+ if x.ObjectWorkerCount != 0 {
+ mm.AppendUint32(5, x.ObjectWorkerCount)
+ }
+ if x.RepOneOnly {
+ mm.AppendBool(6, x.RepOneOnly)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // IgnoreErrors
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors")
+ }
+ x.IgnoreErrors = data
+ case 3: // Scope
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Scope")
+ }
+ x.Scope = data
+ case 4: // ContainerWorkerCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerWorkerCount")
+ }
+ x.ContainerWorkerCount = data
+ case 5: // ObjectWorkerCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount")
+ }
+ x.ObjectWorkerCount = data
+ case 6: // RepOneOnly
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RepOneOnly")
+ }
+ x.RepOneOnly = data
+ }
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *StartShardEvacuationRequest_Body) GetIgnoreErrors() bool {
+ if x != nil {
+ return x.IgnoreErrors
+ }
+ return false
+}
+func (x *StartShardEvacuationRequest_Body) SetIgnoreErrors(v bool) {
+ x.IgnoreErrors = v
+}
+func (x *StartShardEvacuationRequest_Body) GetScope() uint32 {
+ if x != nil {
+ return x.Scope
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) {
+ x.Scope = v
+}
+func (x *StartShardEvacuationRequest_Body) GetContainerWorkerCount() uint32 {
+ if x != nil {
+ return x.ContainerWorkerCount
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetContainerWorkerCount(v uint32) {
+ x.ContainerWorkerCount = v
+}
+func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 {
+ if x != nil {
+ return x.ObjectWorkerCount
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) {
+ x.ObjectWorkerCount = v
+}
+func (x *StartShardEvacuationRequest_Body) GetRepOneOnly() bool {
+ if x != nil {
+ return x.RepOneOnly
+ }
+ return false
+}
+func (x *StartShardEvacuationRequest_Body) SetRepOneOnly(v bool) {
+ x.RepOneOnly = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"ignoreErrors\":"
+ out.RawString(prefix)
+ out.Bool(x.IgnoreErrors)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"scope\":"
+ out.RawString(prefix)
+ out.Uint32(x.Scope)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerWorkerCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ContainerWorkerCount)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"objectWorkerCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ObjectWorkerCount)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"repOneOnly\":"
+ out.RawString(prefix)
+ out.Bool(x.RepOneOnly)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "ignoreErrors":
+ {
+ var f bool
+ f = in.Bool()
+ x.IgnoreErrors = f
+ }
+ case "scope":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Scope = f
+ }
+ case "containerWorkerCount":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.ContainerWorkerCount = f
+ }
+ case "objectWorkerCount":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.ObjectWorkerCount = f
+ }
+ case "repOneOnly":
+ {
+ var f bool
+ f = in.Bool()
+ x.RepOneOnly = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardEvacuationRequest struct {
+ Body *StartShardEvacuationRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest)(nil)
+ _ json.Marshaler = (*StartShardEvacuationRequest)(nil)
+ _ json.Unmarshaler = (*StartShardEvacuationRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardEvacuationRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StartShardEvacuationRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StartShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardEvacuationRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest) GetBody() *StartShardEvacuationRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest) SetBody(v *StartShardEvacuationRequest_Body) {
+ x.Body = v
+}
+func (x *StartShardEvacuationRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardEvacuationRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardEvacuationRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardEvacuationRequest_Body
+ f = new(StartShardEvacuationRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardEvacuationResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse_Body)(nil)
+ _ json.Marshaler = (*StartShardEvacuationResponse_Body)(nil)
+ _ json.Unmarshaler = (*StartShardEvacuationResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardEvacuationResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardEvacuationResponse struct {
+ Body *StartShardEvacuationResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse)(nil)
+ _ json.Marshaler = (*StartShardEvacuationResponse)(nil)
+ _ json.Unmarshaler = (*StartShardEvacuationResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardEvacuationResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StartShardEvacuationResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StartShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardEvacuationResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardEvacuationResponse) GetBody() *StartShardEvacuationResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardEvacuationResponse) SetBody(v *StartShardEvacuationResponse_Body) {
+ x.Body = v
+}
+func (x *StartShardEvacuationResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardEvacuationResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardEvacuationResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardEvacuationResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardEvacuationResponse_Body
+ f = new(StartShardEvacuationResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetShardEvacuationStatusRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusRequest struct {
+ Body *GetShardEvacuationStatusRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusRequest)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetShardEvacuationStatusRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *GetShardEvacuationStatusRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *GetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetShardEvacuationStatusRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusRequest) GetBody() *GetShardEvacuationStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusRequest) SetBody(v *GetShardEvacuationStatusRequest_Body) {
+ x.Body = v
+}
+func (x *GetShardEvacuationStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetShardEvacuationStatusRequest_Body
+ f = new(GetShardEvacuationStatusRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusResponse_Body_Status int32
+
+const (
+ GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED GetShardEvacuationStatusResponse_Body_Status = 0
+ GetShardEvacuationStatusResponse_Body_RUNNING GetShardEvacuationStatusResponse_Body_Status = 1
+ GetShardEvacuationStatusResponse_Body_COMPLETED GetShardEvacuationStatusResponse_Body_Status = 2
+)
+
+var (
+ GetShardEvacuationStatusResponse_Body_Status_name = map[int32]string{
+ 0: "EVACUATE_SHARD_STATUS_UNDEFINED",
+ 1: "RUNNING",
+ 2: "COMPLETED",
+ }
+ GetShardEvacuationStatusResponse_Body_Status_value = map[string]int32{
+ "EVACUATE_SHARD_STATUS_UNDEFINED": 0,
+ "RUNNING": 1,
+ "COMPLETED": 2,
+ }
+)
+
+func (x GetShardEvacuationStatusResponse_Body_Status) String() string {
+ if v, ok := GetShardEvacuationStatusResponse_Body_Status_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *GetShardEvacuationStatusResponse_Body_Status) FromString(s string) bool {
+ if v, ok := GetShardEvacuationStatusResponse_Body_Status_value[s]; ok {
+ *x = GetShardEvacuationStatusResponse_Body_Status(v)
+ return true
+ }
+ return false
+}
+
+type GetShardEvacuationStatusResponse_Body_UnixTimestamp struct {
+ Value int64 `json:"value"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.Int64Size(1, x.Value)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Value != 0 {
+ mm.AppendInt64(1, x.Value)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_UnixTimestamp")
+ }
+ switch fc.FieldNum {
+ case 1: // Value
+ data, ok := fc.Int64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Value")
+ }
+ x.Value = data
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) SetValue(v int64) {
+ x.Value = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"value\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Value, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "value":
+ {
+ var f int64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := int64(v)
+ f = pv
+ x.Value = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusResponse_Body_Duration struct {
+ Seconds int64 `json:"seconds"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.Int64Size(1, x.Seconds)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetShardEvacuationStatusResponse_Body_Duration) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Seconds != 0 {
+ mm.AppendInt64(1, x.Seconds)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_Duration")
+ }
+ switch fc.FieldNum {
+ case 1: // Seconds
+ data, ok := fc.Int64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Seconds")
+ }
+ x.Seconds = data
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body_Duration) GetSeconds() int64 {
+ if x != nil {
+ return x.Seconds
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body_Duration) SetSeconds(v int64) {
+ x.Seconds = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"seconds\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Seconds, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "seconds":
+ {
+ var f int64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := int64(v)
+ f = pv
+ x.Seconds = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusResponse_Body struct {
+ TotalObjects uint64 `json:"totalObjects"`
+ EvacuatedObjects uint64 `json:"evacuatedObjects"`
+ FailedObjects uint64 `json:"failedObjects"`
+ Shard_ID [][]byte `json:"shardID"`
+ Status GetShardEvacuationStatusResponse_Body_Status `json:"status"`
+ Duration *GetShardEvacuationStatusResponse_Body_Duration `json:"duration"`
+ StartedAt *GetShardEvacuationStatusResponse_Body_UnixTimestamp `json:"startedAt"`
+ ErrorMessage string `json:"errorMessage"`
+ SkippedObjects uint64 `json:"skippedObjects"`
+ TotalTrees uint64 `json:"totalTrees"`
+ EvacuatedTrees uint64 `json:"evacuatedTrees"`
+ FailedTrees uint64 `json:"failedTrees"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetShardEvacuationStatusResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.UInt64Size(1, x.TotalObjects)
+ size += proto.UInt64Size(2, x.EvacuatedObjects)
+ size += proto.UInt64Size(3, x.FailedObjects)
+ size += proto.RepeatedBytesSize(4, x.Shard_ID)
+ size += proto.EnumSize(5, int32(x.Status))
+ size += proto.NestedStructureSize(6, x.Duration)
+ size += proto.NestedStructureSize(7, x.StartedAt)
+ size += proto.StringSize(8, x.ErrorMessage)
+ size += proto.UInt64Size(9, x.SkippedObjects)
+ size += proto.UInt64Size(10, x.TotalTrees)
+ size += proto.UInt64Size(11, x.EvacuatedTrees)
+ size += proto.UInt64Size(12, x.FailedTrees)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.TotalObjects != 0 {
+ mm.AppendUint64(1, x.TotalObjects)
+ }
+ if x.EvacuatedObjects != 0 {
+ mm.AppendUint64(2, x.EvacuatedObjects)
+ }
+ if x.FailedObjects != 0 {
+ mm.AppendUint64(3, x.FailedObjects)
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(4, x.Shard_ID[j])
+ }
+ if int32(x.Status) != 0 {
+ mm.AppendInt32(5, int32(x.Status))
+ }
+ if x.Duration != nil {
+ x.Duration.EmitProtobuf(mm.AppendMessage(6))
+ }
+ if x.StartedAt != nil {
+ x.StartedAt.EmitProtobuf(mm.AppendMessage(7))
+ }
+ if len(x.ErrorMessage) != 0 {
+ mm.AppendString(8, x.ErrorMessage)
+ }
+ if x.SkippedObjects != 0 {
+ mm.AppendUint64(9, x.SkippedObjects)
+ }
+ if x.TotalTrees != 0 {
+ mm.AppendUint64(10, x.TotalTrees)
+ }
+ if x.EvacuatedTrees != 0 {
+ mm.AppendUint64(11, x.EvacuatedTrees)
+ }
+ if x.FailedTrees != 0 {
+ mm.AppendUint64(12, x.FailedTrees)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // TotalObjects
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TotalObjects")
+ }
+ x.TotalObjects = data
+ case 2: // EvacuatedObjects
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "EvacuatedObjects")
+ }
+ x.EvacuatedObjects = data
+ case 3: // FailedObjects
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "FailedObjects")
+ }
+ x.FailedObjects = data
+ case 4: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 5: // Status
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Status")
+ }
+ x.Status = GetShardEvacuationStatusResponse_Body_Status(data)
+ case 6: // Duration
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Duration")
+ }
+ x.Duration = new(GetShardEvacuationStatusResponse_Body_Duration)
+ if err := x.Duration.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 7: // StartedAt
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "StartedAt")
+ }
+ x.StartedAt = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp)
+ if err := x.StartedAt.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 8: // ErrorMessage
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ErrorMessage")
+ }
+ x.ErrorMessage = data
+ case 9: // SkippedObjects
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "SkippedObjects")
+ }
+ x.SkippedObjects = data
+ case 10: // TotalTrees
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TotalTrees")
+ }
+ x.TotalTrees = data
+ case 11: // EvacuatedTrees
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "EvacuatedTrees")
+ }
+ x.EvacuatedTrees = data
+ case 12: // FailedTrees
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "FailedTrees")
+ }
+ x.FailedTrees = data
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetTotalObjects() uint64 {
+ if x != nil {
+ return x.TotalObjects
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetTotalObjects(v uint64) {
+ x.TotalObjects = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedObjects() uint64 {
+ if x != nil {
+ return x.EvacuatedObjects
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedObjects(v uint64) {
+ x.EvacuatedObjects = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetFailedObjects() uint64 {
+ if x != nil {
+ return x.FailedObjects
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetFailedObjects(v uint64) {
+ x.FailedObjects = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetStatus() GetShardEvacuationStatusResponse_Body_Status {
+ if x != nil {
+ return x.Status
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetStatus(v GetShardEvacuationStatusResponse_Body_Status) {
+ x.Status = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetDuration() *GetShardEvacuationStatusResponse_Body_Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetDuration(v *GetShardEvacuationStatusResponse_Body_Duration) {
+ x.Duration = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetStartedAt() *GetShardEvacuationStatusResponse_Body_UnixTimestamp {
+ if x != nil {
+ return x.StartedAt
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetStartedAt(v *GetShardEvacuationStatusResponse_Body_UnixTimestamp) {
+ x.StartedAt = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetErrorMessage(v string) {
+ x.ErrorMessage = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetSkippedObjects() uint64 {
+ if x != nil {
+ return x.SkippedObjects
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetSkippedObjects(v uint64) {
+ x.SkippedObjects = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetTotalTrees() uint64 {
+ if x != nil {
+ return x.TotalTrees
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetTotalTrees(v uint64) {
+ x.TotalTrees = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedTrees() uint64 {
+ if x != nil {
+ return x.EvacuatedTrees
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedTrees(v uint64) {
+ x.EvacuatedTrees = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetFailedTrees() uint64 {
+ if x != nil {
+ return x.FailedTrees
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetFailedTrees(v uint64) {
+ x.FailedTrees = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"totalObjects\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalObjects, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"evacuatedObjects\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedObjects, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"failedObjects\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedObjects, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"status\":"
+ out.RawString(prefix)
+ v := int32(x.Status)
+ if vv, ok := GetShardEvacuationStatusResponse_Body_Status_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"duration\":"
+ out.RawString(prefix)
+ x.Duration.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"startedAt\":"
+ out.RawString(prefix)
+ x.StartedAt.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"errorMessage\":"
+ out.RawString(prefix)
+ out.String(x.ErrorMessage)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"skippedObjects\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.SkippedObjects, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"totalTrees\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalTrees, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"evacuatedTrees\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedTrees, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"failedTrees\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedTrees, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "totalObjects":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.TotalObjects = f
+ }
+ case "evacuatedObjects":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.EvacuatedObjects = f
+ }
+ case "failedObjects":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.FailedObjects = f
+ }
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "status":
+ {
+ var f GetShardEvacuationStatusResponse_Body_Status
+ var parsedValue GetShardEvacuationStatusResponse_Body_Status
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := GetShardEvacuationStatusResponse_Body_Status_value[v]; ok {
+ parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv)
+ case float64:
+ parsedValue = GetShardEvacuationStatusResponse_Body_Status(v)
+ }
+ f = parsedValue
+ x.Status = f
+ }
+ case "duration":
+ {
+ var f *GetShardEvacuationStatusResponse_Body_Duration
+ f = new(GetShardEvacuationStatusResponse_Body_Duration)
+ f.UnmarshalEasyJSON(in)
+ x.Duration = f
+ }
+ case "startedAt":
+ {
+ var f *GetShardEvacuationStatusResponse_Body_UnixTimestamp
+ f = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp)
+ f.UnmarshalEasyJSON(in)
+ x.StartedAt = f
+ }
+ case "errorMessage":
+ {
+ var f string
+ f = in.String()
+ x.ErrorMessage = f
+ }
+ case "skippedObjects":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.SkippedObjects = f
+ }
+ case "totalTrees":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.TotalTrees = f
+ }
+ case "evacuatedTrees":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.EvacuatedTrees = f
+ }
+ case "failedTrees":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.FailedTrees = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusResponse struct {
+ Body *GetShardEvacuationStatusResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusResponse)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetShardEvacuationStatusResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *GetShardEvacuationStatusResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *GetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetShardEvacuationStatusResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse) GetBody() *GetShardEvacuationStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse) SetBody(v *GetShardEvacuationStatusResponse_Body) {
+ x.Body = v
+}
+func (x *GetShardEvacuationStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetShardEvacuationStatusResponse_Body
+ f = new(GetShardEvacuationStatusResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ResetShardEvacuationStatusRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
+ _ json.Marshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
+ _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ResetShardEvacuationStatusRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ResetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ResetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ResetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ResetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ResetShardEvacuationStatusRequest struct {
+ Body *ResetShardEvacuationStatusRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest)(nil)
+ _ json.Marshaler = (*ResetShardEvacuationStatusRequest)(nil)
+ _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ResetShardEvacuationStatusRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ResetShardEvacuationStatusRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ResetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ResetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ResetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ResetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ResetShardEvacuationStatusRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusRequest) GetBody() *ResetShardEvacuationStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusRequest) SetBody(v *ResetShardEvacuationStatusRequest_Body) {
+ x.Body = v
+}
+func (x *ResetShardEvacuationStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ResetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ResetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ResetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ResetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ResetShardEvacuationStatusRequest_Body
+ f = new(ResetShardEvacuationStatusRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ResetShardEvacuationStatusResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
+ _ json.Marshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
+ _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ResetShardEvacuationStatusResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ResetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ResetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ResetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ResetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ResetShardEvacuationStatusResponse struct {
+ Body *ResetShardEvacuationStatusResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse)(nil)
+ _ json.Marshaler = (*ResetShardEvacuationStatusResponse)(nil)
+ _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ResetShardEvacuationStatusResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ResetShardEvacuationStatusResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ResetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ResetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ResetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ResetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ResetShardEvacuationStatusResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusResponse) GetBody() *ResetShardEvacuationStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusResponse) SetBody(v *ResetShardEvacuationStatusResponse_Body) {
+ x.Body = v
+}
+func (x *ResetShardEvacuationStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ResetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ResetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ResetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ResetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ResetShardEvacuationStatusResponse_Body
+ f = new(ResetShardEvacuationStatusResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StopShardEvacuationRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest_Body)(nil)
+ _ json.Marshaler = (*StopShardEvacuationRequest_Body)(nil)
+ _ json.Unmarshaler = (*StopShardEvacuationRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StopShardEvacuationRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StopShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StopShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StopShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StopShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StopShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StopShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StopShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StopShardEvacuationRequest struct {
+ Body *StopShardEvacuationRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest)(nil)
+ _ json.Marshaler = (*StopShardEvacuationRequest)(nil)
+ _ json.Unmarshaler = (*StopShardEvacuationRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StopShardEvacuationRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StopShardEvacuationRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StopShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StopShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StopShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StopShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StopShardEvacuationRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StopShardEvacuationRequest) GetBody() *StopShardEvacuationRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StopShardEvacuationRequest) SetBody(v *StopShardEvacuationRequest_Body) {
+ x.Body = v
+}
+func (x *StopShardEvacuationRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StopShardEvacuationRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StopShardEvacuationRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StopShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StopShardEvacuationRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StopShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StopShardEvacuationRequest_Body
+ f = new(StopShardEvacuationRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StopShardEvacuationResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse_Body)(nil)
+ _ json.Marshaler = (*StopShardEvacuationResponse_Body)(nil)
+ _ json.Unmarshaler = (*StopShardEvacuationResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StopShardEvacuationResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StopShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StopShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StopShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StopShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StopShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StopShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StopShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StopShardEvacuationResponse struct {
+ Body *StopShardEvacuationResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse)(nil)
+ _ json.Marshaler = (*StopShardEvacuationResponse)(nil)
+ _ json.Unmarshaler = (*StopShardEvacuationResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StopShardEvacuationResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StopShardEvacuationResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StopShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StopShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StopShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StopShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StopShardEvacuationResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StopShardEvacuationResponse) GetBody() *StopShardEvacuationResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StopShardEvacuationResponse) SetBody(v *StopShardEvacuationResponse_Body) {
+ x.Body = v
+}
+func (x *StopShardEvacuationResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StopShardEvacuationResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StopShardEvacuationResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StopShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StopShardEvacuationResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StopShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StopShardEvacuationResponse_Body
+ f = new(StopShardEvacuationResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddChainLocalOverrideRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+ Chain []byte `json:"chain"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest_Body)(nil)
+ _ json.Marshaler = (*AddChainLocalOverrideRequest_Body)(nil)
+ _ json.Unmarshaler = (*AddChainLocalOverrideRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *AddChainLocalOverrideRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Target)
+ size += proto.BytesSize(2, x.Chain)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *AddChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if len(x.Chain) != 0 {
+ mm.AppendBytes(2, x.Chain)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Chain
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Chain")
+ }
+ x.Chain = data
+ }
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+func (x *AddChainLocalOverrideRequest_Body) GetChain() []byte {
+ if x != nil {
+ return x.Chain
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest_Body) SetChain(v []byte) {
+ x.Chain = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
+ x.Target.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chain\":"
+ out.RawString(prefix)
+ if x.Chain != nil {
+ out.Base64Bytes(x.Chain)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ case "chain":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Chain = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddChainLocalOverrideRequest struct {
+ Body *AddChainLocalOverrideRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest)(nil)
+ _ json.Marshaler = (*AddChainLocalOverrideRequest)(nil)
+ _ json.Unmarshaler = (*AddChainLocalOverrideRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *AddChainLocalOverrideRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *AddChainLocalOverrideRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *AddChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *AddChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddChainLocalOverrideRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest) GetBody() *AddChainLocalOverrideRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest) SetBody(v *AddChainLocalOverrideRequest_Body) {
+ x.Body = v
+}
+func (x *AddChainLocalOverrideRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddChainLocalOverrideRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddChainLocalOverrideRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddChainLocalOverrideRequest_Body
+ f = new(AddChainLocalOverrideRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddChainLocalOverrideResponse_Body struct {
+ ChainId []byte `json:"chainId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse_Body)(nil)
+ _ json.Marshaler = (*AddChainLocalOverrideResponse_Body)(nil)
+ _ json.Unmarshaler = (*AddChainLocalOverrideResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *AddChainLocalOverrideResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.BytesSize(1, x.ChainId)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *AddChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ChainId) != 0 {
+ mm.AppendBytes(1, x.ChainId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ChainId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChainId")
+ }
+ x.ChainId = data
+ }
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse_Body) GetChainId() []byte {
+ if x != nil {
+ return x.ChainId
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse_Body) SetChainId(v []byte) {
+ x.ChainId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chainId\":"
+ out.RawString(prefix)
+ if x.ChainId != nil {
+ out.Base64Bytes(x.ChainId)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "chainId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ChainId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddChainLocalOverrideResponse struct {
+ Body *AddChainLocalOverrideResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse)(nil)
+ _ json.Marshaler = (*AddChainLocalOverrideResponse)(nil)
+ _ json.Unmarshaler = (*AddChainLocalOverrideResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *AddChainLocalOverrideResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *AddChainLocalOverrideResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *AddChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *AddChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddChainLocalOverrideResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse) GetBody() *AddChainLocalOverrideResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse) SetBody(v *AddChainLocalOverrideResponse_Body) {
+ x.Body = v
+}
+func (x *AddChainLocalOverrideResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddChainLocalOverrideResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddChainLocalOverrideResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddChainLocalOverrideResponse_Body
+ f = new(AddChainLocalOverrideResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetChainLocalOverrideRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+ ChainId []byte `json:"chainId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest_Body)(nil)
+ _ json.Marshaler = (*GetChainLocalOverrideRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetChainLocalOverrideRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetChainLocalOverrideRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Target)
+ size += proto.BytesSize(2, x.ChainId)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if len(x.ChainId) != 0 {
+ mm.AppendBytes(2, x.ChainId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // ChainId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChainId")
+ }
+ x.ChainId = data
+ }
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+func (x *GetChainLocalOverrideRequest_Body) GetChainId() []byte {
+ if x != nil {
+ return x.ChainId
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest_Body) SetChainId(v []byte) {
+ x.ChainId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
+ x.Target.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chainId\":"
+ out.RawString(prefix)
+ if x.ChainId != nil {
+ out.Base64Bytes(x.ChainId)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ case "chainId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ChainId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetChainLocalOverrideRequest struct {
+ Body *GetChainLocalOverrideRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest)(nil)
+ _ json.Marshaler = (*GetChainLocalOverrideRequest)(nil)
+ _ json.Unmarshaler = (*GetChainLocalOverrideRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetChainLocalOverrideRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *GetChainLocalOverrideRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *GetChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetChainLocalOverrideRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest) GetBody() *GetChainLocalOverrideRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest) SetBody(v *GetChainLocalOverrideRequest_Body) {
+ x.Body = v
+}
+func (x *GetChainLocalOverrideRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetChainLocalOverrideRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetChainLocalOverrideRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetChainLocalOverrideRequest_Body
+ f = new(GetChainLocalOverrideRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetChainLocalOverrideResponse_Body struct {
+ Chain []byte `json:"chain"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse_Body)(nil)
+ _ json.Marshaler = (*GetChainLocalOverrideResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetChainLocalOverrideResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetChainLocalOverrideResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.BytesSize(1, x.Chain)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Chain) != 0 {
+ mm.AppendBytes(1, x.Chain)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Chain
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Chain")
+ }
+ x.Chain = data
+ }
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse_Body) GetChain() []byte {
+ if x != nil {
+ return x.Chain
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse_Body) SetChain(v []byte) {
+ x.Chain = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chain\":"
+ out.RawString(prefix)
+ if x.Chain != nil {
+ out.Base64Bytes(x.Chain)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "chain":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Chain = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetChainLocalOverrideResponse struct {
+ Body *GetChainLocalOverrideResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse)(nil)
+ _ json.Marshaler = (*GetChainLocalOverrideResponse)(nil)
+ _ json.Unmarshaler = (*GetChainLocalOverrideResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetChainLocalOverrideResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *GetChainLocalOverrideResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *GetChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetChainLocalOverrideResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse) GetBody() *GetChainLocalOverrideResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse) SetBody(v *GetChainLocalOverrideResponse_Body) {
+ x.Body = v
+}
+func (x *GetChainLocalOverrideResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetChainLocalOverrideResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetChainLocalOverrideResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetChainLocalOverrideResponse_Body
+ f = new(GetChainLocalOverrideResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListChainLocalOverridesRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest_Body)(nil)
+ _ json.Marshaler = (*ListChainLocalOverridesRequest_Body)(nil)
+ _ json.Unmarshaler = (*ListChainLocalOverridesRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListChainLocalOverridesRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Target)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListChainLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListChainLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListChainLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListChainLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListChainLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
+ x.Target.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListChainLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListChainLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListChainLocalOverridesRequest struct {
+ Body *ListChainLocalOverridesRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest)(nil)
+ _ json.Marshaler = (*ListChainLocalOverridesRequest)(nil)
+ _ json.Unmarshaler = (*ListChainLocalOverridesRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListChainLocalOverridesRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListChainLocalOverridesRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListChainLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListChainLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListChainLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListChainLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListChainLocalOverridesRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest) GetBody() *ListChainLocalOverridesRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest) SetBody(v *ListChainLocalOverridesRequest_Body) {
+ x.Body = v
+}
+func (x *ListChainLocalOverridesRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListChainLocalOverridesRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListChainLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListChainLocalOverridesRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListChainLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListChainLocalOverridesRequest_Body
+ f = new(ListChainLocalOverridesRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListChainLocalOverridesResponse_Body struct {
+ Chains [][]byte `json:"chains"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse_Body)(nil)
+ _ json.Marshaler = (*ListChainLocalOverridesResponse_Body)(nil)
+ _ json.Unmarshaler = (*ListChainLocalOverridesResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListChainLocalOverridesResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Chains)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListChainLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListChainLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Chains {
+ mm.AppendBytes(1, x.Chains[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListChainLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Chains
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Chains")
+ }
+ x.Chains = append(x.Chains, data)
+ }
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse_Body) GetChains() [][]byte {
+ if x != nil {
+ return x.Chains
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse_Body) SetChains(v [][]byte) {
+ x.Chains = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListChainLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListChainLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chains\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Chains {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Chains[i] != nil {
+ out.Base64Bytes(x.Chains[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListChainLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListChainLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "chains":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Chains = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListChainLocalOverridesResponse struct {
+ Body *ListChainLocalOverridesResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse)(nil)
+ _ json.Marshaler = (*ListChainLocalOverridesResponse)(nil)
+ _ json.Unmarshaler = (*ListChainLocalOverridesResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListChainLocalOverridesResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListChainLocalOverridesResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListChainLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListChainLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListChainLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListChainLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListChainLocalOverridesResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse) GetBody() *ListChainLocalOverridesResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse) SetBody(v *ListChainLocalOverridesResponse_Body) {
+ x.Body = v
+}
+func (x *ListChainLocalOverridesResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListChainLocalOverridesResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListChainLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListChainLocalOverridesResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListChainLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListChainLocalOverridesResponse_Body
+ f = new(ListChainLocalOverridesResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListTargetsLocalOverridesRequest_Body struct {
+ ChainName string `json:"chainName"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
+ _ json.Marshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
+ _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListTargetsLocalOverridesRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.StringSize(1, x.ChainName)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListTargetsLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListTargetsLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ChainName) != 0 {
+ mm.AppendString(1, x.ChainName)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ChainName
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChainName")
+ }
+ x.ChainName = data
+ }
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesRequest_Body) GetChainName() string {
+ if x != nil {
+ return x.ChainName
+ }
+ return ""
+}
+func (x *ListTargetsLocalOverridesRequest_Body) SetChainName(v string) {
+ x.ChainName = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListTargetsLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListTargetsLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chainName\":"
+ out.RawString(prefix)
+ out.String(x.ChainName)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "chainName":
+ {
+ var f string
+ f = in.String()
+ x.ChainName = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListTargetsLocalOverridesRequest struct {
+ Body *ListTargetsLocalOverridesRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest)(nil)
+ _ json.Marshaler = (*ListTargetsLocalOverridesRequest)(nil)
+ _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListTargetsLocalOverridesRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListTargetsLocalOverridesRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListTargetsLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListTargetsLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListTargetsLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListTargetsLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListTargetsLocalOverridesRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesRequest) GetBody() *ListTargetsLocalOverridesRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesRequest) SetBody(v *ListTargetsLocalOverridesRequest_Body) {
+ x.Body = v
+}
+func (x *ListTargetsLocalOverridesRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListTargetsLocalOverridesRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListTargetsLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListTargetsLocalOverridesRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListTargetsLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListTargetsLocalOverridesRequest_Body
+ f = new(ListTargetsLocalOverridesRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListTargetsLocalOverridesResponse_Body struct {
+ Targets []ChainTarget `json:"targets"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
+ _ json.Marshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
+ _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListTargetsLocalOverridesResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ for i := range x.Targets {
+ size += proto.NestedStructureSizeUnchecked(1, &x.Targets[i])
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListTargetsLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListTargetsLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Targets {
+ x.Targets[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Targets
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Targets")
+ }
+ x.Targets = append(x.Targets, ChainTarget{})
+ ff := &x.Targets[len(x.Targets)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []ChainTarget {
+ if x != nil {
+ return x.Targets
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse_Body) SetTargets(v []ChainTarget) {
+ x.Targets = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListTargetsLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListTargetsLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"targets\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Targets {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Targets[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "targets":
+ {
+ var f ChainTarget
+ var list []ChainTarget
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = ChainTarget{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Targets = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListTargetsLocalOverridesResponse struct {
+ Body *ListTargetsLocalOverridesResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse)(nil)
+ _ json.Marshaler = (*ListTargetsLocalOverridesResponse)(nil)
+ _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListTargetsLocalOverridesResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListTargetsLocalOverridesResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListTargetsLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListTargetsLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListTargetsLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListTargetsLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListTargetsLocalOverridesResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse) GetBody() *ListTargetsLocalOverridesResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse) SetBody(v *ListTargetsLocalOverridesResponse_Body) {
+ x.Body = v
+}
+func (x *ListTargetsLocalOverridesResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListTargetsLocalOverridesResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListTargetsLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListTargetsLocalOverridesResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListTargetsLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListTargetsLocalOverridesResponse_Body
+ f = new(ListTargetsLocalOverridesResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverrideRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+ ChainId []byte `json:"chainId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveChainLocalOverrideRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Target)
+ size += proto.BytesSize(2, x.ChainId)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if len(x.ChainId) != 0 {
+ mm.AppendBytes(2, x.ChainId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // ChainId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChainId")
+ }
+ x.ChainId = data
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+func (x *RemoveChainLocalOverrideRequest_Body) GetChainId() []byte {
+ if x != nil {
+ return x.ChainId
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest_Body) SetChainId(v []byte) {
+ x.ChainId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
+ x.Target.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"chainId\":"
+ out.RawString(prefix)
+ if x.ChainId != nil {
+ out.Base64Bytes(x.ChainId)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ case "chainId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ChainId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverrideRequest struct {
+ Body *RemoveChainLocalOverrideRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverrideRequest)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveChainLocalOverrideRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *RemoveChainLocalOverrideRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *RemoveChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveChainLocalOverrideRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest) GetBody() *RemoveChainLocalOverrideRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest) SetBody(v *RemoveChainLocalOverrideRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveChainLocalOverrideRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverrideRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverrideRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveChainLocalOverrideRequest_Body
+ f = new(RemoveChainLocalOverrideRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverrideResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveChainLocalOverrideResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverrideResponse struct {
+ Body *RemoveChainLocalOverrideResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverrideResponse)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveChainLocalOverrideResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *RemoveChainLocalOverrideResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *RemoveChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveChainLocalOverrideResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideResponse) GetBody() *RemoveChainLocalOverrideResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideResponse) SetBody(v *RemoveChainLocalOverrideResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveChainLocalOverrideResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverrideResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverrideResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveChainLocalOverrideResponse_Body
+ f = new(RemoveChainLocalOverrideResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverridesByTargetRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Target)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"target\":"
+ out.RawString(prefix)
+ x.Target.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverridesByTargetRequest struct {
+ Body *RemoveChainLocalOverridesByTargetRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveChainLocalOverridesByTargetRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *RemoveChainLocalOverridesByTargetRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *RemoveChainLocalOverridesByTargetRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveChainLocalOverridesByTargetRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveChainLocalOverridesByTargetRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) GetBody() *RemoveChainLocalOverridesByTargetRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) SetBody(v *RemoveChainLocalOverridesByTargetRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveChainLocalOverridesByTargetRequest_Body
+ f = new(RemoveChainLocalOverridesByTargetRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverridesByTargetResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverridesByTargetResponse struct {
+ Body *RemoveChainLocalOverridesByTargetResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *RemoveChainLocalOverridesByTargetResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *RemoveChainLocalOverridesByTargetResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *RemoveChainLocalOverridesByTargetResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveChainLocalOverridesByTargetResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveChainLocalOverridesByTargetResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) GetBody() *RemoveChainLocalOverridesByTargetResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) SetBody(v *RemoveChainLocalOverridesByTargetResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveChainLocalOverridesByTargetResponse_Body
+ f = new(RemoveChainLocalOverridesByTargetResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+ Async bool `json:"async"`
+ RestoreMode bool `json:"restoreMode"`
+ Shrink bool `json:"shrink"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest_Body)(nil)
+ _ json.Marshaler = (*SealWriteCacheRequest_Body)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *SealWriteCacheRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ size += proto.BoolSize(2, x.IgnoreErrors)
+ size += proto.BoolSize(3, x.Async)
+ size += proto.BoolSize(4, x.RestoreMode)
+ size += proto.BoolSize(5, x.Shrink)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *SealWriteCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.IgnoreErrors {
+ mm.AppendBool(2, x.IgnoreErrors)
+ }
+ if x.Async {
+ mm.AppendBool(3, x.Async)
+ }
+ if x.RestoreMode {
+ mm.AppendBool(4, x.RestoreMode)
+ }
+ if x.Shrink {
+ mm.AppendBool(5, x.Shrink)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // IgnoreErrors
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors")
+ }
+ x.IgnoreErrors = data
+ case 3: // Async
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Async")
+ }
+ x.Async = data
+ case 4: // RestoreMode
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RestoreMode")
+ }
+ x.RestoreMode = data
+ case 5: // Shrink
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shrink")
+ }
+ x.Shrink = data
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *SealWriteCacheRequest_Body) GetIgnoreErrors() bool {
+ if x != nil {
+ return x.IgnoreErrors
+ }
+ return false
+}
+func (x *SealWriteCacheRequest_Body) SetIgnoreErrors(v bool) {
+ x.IgnoreErrors = v
+}
+func (x *SealWriteCacheRequest_Body) GetAsync() bool {
+ if x != nil {
+ return x.Async
+ }
+ return false
+}
+func (x *SealWriteCacheRequest_Body) SetAsync(v bool) {
+ x.Async = v
+}
+func (x *SealWriteCacheRequest_Body) GetRestoreMode() bool {
+ if x != nil {
+ return x.RestoreMode
+ }
+ return false
+}
+func (x *SealWriteCacheRequest_Body) SetRestoreMode(v bool) {
+ x.RestoreMode = v
+}
+func (x *SealWriteCacheRequest_Body) GetShrink() bool {
+ if x != nil {
+ return x.Shrink
+ }
+ return false
+}
+func (x *SealWriteCacheRequest_Body) SetShrink(v bool) {
+ x.Shrink = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"ignoreErrors\":"
+ out.RawString(prefix)
+ out.Bool(x.IgnoreErrors)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"async\":"
+ out.RawString(prefix)
+ out.Bool(x.Async)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"restoreMode\":"
+ out.RawString(prefix)
+ out.Bool(x.RestoreMode)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shrink\":"
+ out.RawString(prefix)
+ out.Bool(x.Shrink)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "ignoreErrors":
+ {
+ var f bool
+ f = in.Bool()
+ x.IgnoreErrors = f
+ }
+ case "async":
+ {
+ var f bool
+ f = in.Bool()
+ x.Async = f
+ }
+ case "restoreMode":
+ {
+ var f bool
+ f = in.Bool()
+ x.RestoreMode = f
+ }
+ case "shrink":
+ {
+ var f bool
+ f = in.Bool()
+ x.Shrink = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheRequest struct {
+ Body *SealWriteCacheRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest)(nil)
+ _ json.Marshaler = (*SealWriteCacheRequest)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *SealWriteCacheRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *SealWriteCacheRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *SealWriteCacheRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *SealWriteCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SealWriteCacheRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest) GetBody() *SealWriteCacheRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest) SetBody(v *SealWriteCacheRequest_Body) {
+ x.Body = v
+}
+func (x *SealWriteCacheRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SealWriteCacheRequest_Body
+ f = new(SealWriteCacheRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheResponse_Body_Status struct {
+ Shard_ID []byte `json:"shardID"`
+ Success bool `json:"success"`
+ Error string `json:"error"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body_Status)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body_Status)(nil)
+ _ json.Marshaler = (*SealWriteCacheResponse_Body_Status)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheResponse_Body_Status)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *SealWriteCacheResponse_Body_Status) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.BytesSize(1, x.Shard_ID)
+ size += proto.BoolSize(2, x.Success)
+ size += proto.StringSize(3, x.Error)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheResponse_Body_Status) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *SealWriteCacheResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Shard_ID) != 0 {
+ mm.AppendBytes(1, x.Shard_ID)
+ }
+ if x.Success {
+ mm.AppendBool(2, x.Success)
+ }
+ if len(x.Error) != 0 {
+ mm.AppendString(3, x.Error)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body_Status")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = data
+ case 2: // Success
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Success")
+ }
+ x.Success = data
+ case 3: // Error
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Error")
+ }
+ x.Error = data
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse_Body_Status) GetShard_ID() []byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse_Body_Status) SetShard_ID(v []byte) {
+ x.Shard_ID = v
+}
+func (x *SealWriteCacheResponse_Body_Status) GetSuccess() bool {
+ if x != nil {
+ return x.Success
+ }
+ return false
+}
+func (x *SealWriteCacheResponse_Body_Status) SetSuccess(v bool) {
+ x.Success = v
+}
+func (x *SealWriteCacheResponse_Body_Status) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+func (x *SealWriteCacheResponse_Body_Status) SetError(v string) {
+ x.Error = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheResponse_Body_Status) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ if x.Shard_ID != nil {
+ out.Base64Bytes(x.Shard_ID)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"success\":"
+ out.RawString(prefix)
+ out.Bool(x.Success)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"error\":"
+ out.RawString(prefix)
+ out.String(x.Error)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheResponse_Body_Status) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Shard_ID = f
+ }
+ case "success":
+ {
+ var f bool
+ f = in.Bool()
+ x.Success = f
+ }
+ case "error":
+ {
+ var f string
+ f = in.String()
+ x.Error = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheResponse_Body struct {
+ Results []SealWriteCacheResponse_Body_Status `json:"results"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body)(nil)
+ _ json.Marshaler = (*SealWriteCacheResponse_Body)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *SealWriteCacheResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ for i := range x.Results {
+ size += proto.NestedStructureSizeUnchecked(1, &x.Results[i])
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *SealWriteCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Results {
+ x.Results[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Results
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Results")
+ }
+ x.Results = append(x.Results, SealWriteCacheResponse_Body_Status{})
+ ff := &x.Results[len(x.Results)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse_Body) GetResults() []SealWriteCacheResponse_Body_Status {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse_Body) SetResults(v []SealWriteCacheResponse_Body_Status) {
+ x.Results = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"results\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Results {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Results[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "results":
+ {
+ var f SealWriteCacheResponse_Body_Status
+ var list []SealWriteCacheResponse_Body_Status
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = SealWriteCacheResponse_Body_Status{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Results = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheResponse struct {
+ Body *SealWriteCacheResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse)(nil)
+ _ json.Marshaler = (*SealWriteCacheResponse)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *SealWriteCacheResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *SealWriteCacheResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *SealWriteCacheResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *SealWriteCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SealWriteCacheResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse) GetBody() *SealWriteCacheResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse) SetBody(v *SealWriteCacheResponse_Body) {
+ x.Body = v
+}
+func (x *SealWriteCacheResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SealWriteCacheResponse_Body
+ f = new(SealWriteCacheResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DetachShardsRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DetachShardsRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DetachShardsRequest_Body)(nil)
+ _ json.Marshaler = (*DetachShardsRequest_Body)(nil)
+ _ json.Unmarshaler = (*DetachShardsRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DetachShardsRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DetachShardsRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DetachShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DetachShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ }
+ }
+ return nil
+}
+func (x *DetachShardsRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *DetachShardsRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DetachShardsRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DetachShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DetachShardsRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DetachShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DetachShardsRequest struct {
+ Body *DetachShardsRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DetachShardsRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*DetachShardsRequest)(nil)
+ _ json.Marshaler = (*DetachShardsRequest)(nil)
+ _ json.Unmarshaler = (*DetachShardsRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DetachShardsRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *DetachShardsRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *DetachShardsRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DetachShardsRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DetachShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DetachShardsRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DetachShardsRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DetachShardsRequest) GetBody() *DetachShardsRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DetachShardsRequest) SetBody(v *DetachShardsRequest_Body) {
+ x.Body = v
+}
+func (x *DetachShardsRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DetachShardsRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DetachShardsRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DetachShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DetachShardsRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DetachShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DetachShardsRequest_Body
+ f = new(DetachShardsRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DetachShardsResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DetachShardsResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DetachShardsResponse_Body)(nil)
+ _ json.Marshaler = (*DetachShardsResponse_Body)(nil)
+ _ json.Unmarshaler = (*DetachShardsResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DetachShardsResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DetachShardsResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DetachShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DetachShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DetachShardsResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DetachShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DetachShardsResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DetachShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DetachShardsResponse struct {
+ Body *DetachShardsResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DetachShardsResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*DetachShardsResponse)(nil)
+ _ json.Marshaler = (*DetachShardsResponse)(nil)
+ _ json.Unmarshaler = (*DetachShardsResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DetachShardsResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *DetachShardsResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *DetachShardsResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DetachShardsResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DetachShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DetachShardsResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DetachShardsResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DetachShardsResponse) GetBody() *DetachShardsResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DetachShardsResponse) SetBody(v *DetachShardsResponse_Body) {
+ x.Body = v
+}
+func (x *DetachShardsResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DetachShardsResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DetachShardsResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DetachShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DetachShardsResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DetachShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DetachShardsResponse_Body
+ f = new(DetachShardsResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ TargetFillPercent uint32 `json:"targetFillPercent"`
+ ConcurrencyLimit uint32 `json:"concurrencyLimit"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest_Body)(nil)
+ _ json.Marshaler = (*StartShardRebuildRequest_Body)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ size += proto.UInt32Size(2, x.TargetFillPercent)
+ size += proto.UInt32Size(3, x.ConcurrencyLimit)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.TargetFillPercent != 0 {
+ mm.AppendUint32(2, x.TargetFillPercent)
+ }
+ if x.ConcurrencyLimit != 0 {
+ mm.AppendUint32(3, x.ConcurrencyLimit)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // TargetFillPercent
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TargetFillPercent")
+ }
+ x.TargetFillPercent = data
+ case 3: // ConcurrencyLimit
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ConcurrencyLimit")
+ }
+ x.ConcurrencyLimit = data
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *StartShardRebuildRequest_Body) GetTargetFillPercent() uint32 {
+ if x != nil {
+ return x.TargetFillPercent
+ }
+ return 0
+}
+func (x *StartShardRebuildRequest_Body) SetTargetFillPercent(v uint32) {
+ x.TargetFillPercent = v
+}
+func (x *StartShardRebuildRequest_Body) GetConcurrencyLimit() uint32 {
+ if x != nil {
+ return x.ConcurrencyLimit
+ }
+ return 0
+}
+func (x *StartShardRebuildRequest_Body) SetConcurrencyLimit(v uint32) {
+ x.ConcurrencyLimit = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"targetFillPercent\":"
+ out.RawString(prefix)
+ out.Uint32(x.TargetFillPercent)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"concurrencyLimit\":"
+ out.RawString(prefix)
+ out.Uint32(x.ConcurrencyLimit)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "targetFillPercent":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.TargetFillPercent = f
+ }
+ case "concurrencyLimit":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.ConcurrencyLimit = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildRequest struct {
+ Body *StartShardRebuildRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest)(nil)
+ _ json.Marshaler = (*StartShardRebuildRequest)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StartShardRebuildRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StartShardRebuildRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardRebuildRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest) GetBody() *StartShardRebuildRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest) SetBody(v *StartShardRebuildRequest_Body) {
+ x.Body = v
+}
+func (x *StartShardRebuildRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardRebuildRequest_Body
+ f = new(StartShardRebuildRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildResponse_Body_Status struct {
+ Shard_ID []byte `json:"shardID"`
+ Success bool `json:"success"`
+ Error string `json:"error"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+ _ json.Marshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildResponse_Body_Status) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.BytesSize(1, x.Shard_ID)
+ size += proto.BoolSize(2, x.Success)
+ size += proto.StringSize(3, x.Error)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Shard_ID) != 0 {
+ mm.AppendBytes(1, x.Shard_ID)
+ }
+ if x.Success {
+ mm.AppendBool(2, x.Success)
+ }
+ if len(x.Error) != 0 {
+ mm.AppendString(3, x.Error)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body_Status")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = data
+ case 2: // Success
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Success")
+ }
+ x.Success = data
+ case 3: // Error
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Error")
+ }
+ x.Error = data
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body_Status) GetShard_ID() []byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body_Status) SetShard_ID(v []byte) {
+ x.Shard_ID = v
+}
+func (x *StartShardRebuildResponse_Body_Status) GetSuccess() bool {
+ if x != nil {
+ return x.Success
+ }
+ return false
+}
+func (x *StartShardRebuildResponse_Body_Status) SetSuccess(v bool) {
+ x.Success = v
+}
+func (x *StartShardRebuildResponse_Body_Status) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+func (x *StartShardRebuildResponse_Body_Status) SetError(v string) {
+ x.Error = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ if x.Shard_ID != nil {
+ out.Base64Bytes(x.Shard_ID)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"success\":"
+ out.RawString(prefix)
+ out.Bool(x.Success)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"error\":"
+ out.RawString(prefix)
+ out.String(x.Error)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Shard_ID = f
+ }
+ case "success":
+ {
+ var f bool
+ f = in.Bool()
+ x.Success = f
+ }
+ case "error":
+ {
+ var f string
+ f = in.String()
+ x.Error = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildResponse_Body struct {
+ Results []StartShardRebuildResponse_Body_Status `json:"results"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body)(nil)
+ _ json.Marshaler = (*StartShardRebuildResponse_Body)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ for i := range x.Results {
+ size += proto.NestedStructureSizeUnchecked(1, &x.Results[i])
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Results {
+ x.Results[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Results
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Results")
+ }
+ x.Results = append(x.Results, StartShardRebuildResponse_Body_Status{})
+ ff := &x.Results[len(x.Results)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body) GetResults() []StartShardRebuildResponse_Body_Status {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body) SetResults(v []StartShardRebuildResponse_Body_Status) {
+ x.Results = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"results\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Results {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Results[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "results":
+ {
+ var f StartShardRebuildResponse_Body_Status
+ var list []StartShardRebuildResponse_Body_Status
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = StartShardRebuildResponse_Body_Status{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Results = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildResponse struct {
+ Body *StartShardRebuildResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse)(nil)
+ _ json.Marshaler = (*StartShardRebuildResponse)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StartShardRebuildResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StartShardRebuildResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardRebuildResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse) GetBody() *StartShardRebuildResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse) SetBody(v *StartShardRebuildResponse_Body) {
+ x.Body = v
+}
+func (x *StartShardRebuildResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardRebuildResponse_Body
+ f = new(StartShardRebuildResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectRequest_Body struct {
+ ObjectId string `json:"objectId"`
+ ContainerId string `json:"containerId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil)
+ _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.StringSize(1, x.ObjectId)
+ size += proto.StringSize(2, x.ContainerId)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ObjectId) != 0 {
+ mm.AppendString(1, x.ObjectId)
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendString(2, x.ContainerId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ObjectId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ObjectId")
+ }
+ x.ObjectId = data
+ case 2: // ContainerId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest_Body) GetObjectId() string {
+ if x != nil {
+ return x.ObjectId
+ }
+ return ""
+}
+func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) {
+ x.ObjectId = v
+}
+func (x *ListShardsForObjectRequest_Body) GetContainerId() string {
+ if x != nil {
+ return x.ContainerId
+ }
+ return ""
+}
+func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) {
+ x.ContainerId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"objectId\":"
+ out.RawString(prefix)
+ out.String(x.ObjectId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ out.String(x.ContainerId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "objectId":
+ {
+ var f string
+ f = in.String()
+ x.ObjectId = f
+ }
+ case "containerId":
+ {
+ var f string
+ f = in.String()
+ x.ContainerId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectRequest struct {
+ Body *ListShardsForObjectRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil)
+ _ json.Marshaler = (*ListShardsForObjectRequest)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListShardsForObjectRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsForObjectRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) {
+ x.Body = v
+}
+func (x *ListShardsForObjectRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsForObjectRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsForObjectRequest_Body
+ f = new(ListShardsForObjectRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectResponse_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil)
+ _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ if x.Shard_ID[i] != nil {
+ out.Base64Bytes(x.Shard_ID[i])
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsForObjectResponse struct {
+ Body *ListShardsForObjectResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil)
+ _ json.Marshaler = (*ListShardsForObjectResponse)(nil)
+ _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ListShardsForObjectResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *ListShardsForObjectResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsForObjectResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) {
+ x.Body = v
+}
+func (x *ListShardsForObjectResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsForObjectResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsForObjectResponse_Body
+ f = new(ListShardsForObjectResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index 2cfddd7f57..045662ccfa 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.21.12
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.25.0
// source: pkg/services/control/service.proto
package control
@@ -18,6 +18,32 @@ import (
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
+const (
+ ControlService_HealthCheck_FullMethodName = "/control.ControlService/HealthCheck"
+ ControlService_SetNetmapStatus_FullMethodName = "/control.ControlService/SetNetmapStatus"
+ ControlService_GetNetmapStatus_FullMethodName = "/control.ControlService/GetNetmapStatus"
+ ControlService_DropObjects_FullMethodName = "/control.ControlService/DropObjects"
+ ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
+ ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
+ ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
+ ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation"
+ ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus"
+ ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus"
+ ControlService_StopShardEvacuation_FullMethodName = "/control.ControlService/StopShardEvacuation"
+ ControlService_FlushCache_FullMethodName = "/control.ControlService/FlushCache"
+ ControlService_Doctor_FullMethodName = "/control.ControlService/Doctor"
+ ControlService_AddChainLocalOverride_FullMethodName = "/control.ControlService/AddChainLocalOverride"
+ ControlService_GetChainLocalOverride_FullMethodName = "/control.ControlService/GetChainLocalOverride"
+ ControlService_ListChainLocalOverrides_FullMethodName = "/control.ControlService/ListChainLocalOverrides"
+ ControlService_RemoveChainLocalOverride_FullMethodName = "/control.ControlService/RemoveChainLocalOverride"
+ ControlService_RemoveChainLocalOverridesByTarget_FullMethodName = "/control.ControlService/RemoveChainLocalOverridesByTarget"
+ ControlService_ListTargetsLocalOverrides_FullMethodName = "/control.ControlService/ListTargetsLocalOverrides"
+ ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache"
+ ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards"
+ ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild"
+ ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject"
+)
+
// ControlServiceClient is the client API for ControlService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
@@ -26,22 +52,52 @@ type ControlServiceClient interface {
HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
// Sets status of the storage node in FrostFS network map.
SetNetmapStatus(ctx context.Context, in *SetNetmapStatusRequest, opts ...grpc.CallOption) (*SetNetmapStatusResponse, error)
+ // Gets status of the storage node in FrostFS network map.
+ GetNetmapStatus(ctx context.Context, in *GetNetmapStatusRequest, opts ...grpc.CallOption) (*GetNetmapStatusResponse, error)
// Mark objects to be removed from node's local object storage.
DropObjects(ctx context.Context, in *DropObjectsRequest, opts ...grpc.CallOption) (*DropObjectsResponse, error)
// Returns list that contains information about all shards of a node.
ListShards(ctx context.Context, in *ListShardsRequest, opts ...grpc.CallOption) (*ListShardsResponse, error)
// Sets mode of the shard.
SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error)
- // Dump objects from the shard.
- DumpShard(ctx context.Context, in *DumpShardRequest, opts ...grpc.CallOption) (*DumpShardResponse, error)
- // Restore objects from dump.
- RestoreShard(ctx context.Context, in *RestoreShardRequest, opts ...grpc.CallOption) (*RestoreShardResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error)
- // EvacuateShard moves all data from one shard to the others.
- EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error)
+ // StartShardEvacuation starts moving all data from one shard to the others.
+ StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error)
+ // GetShardEvacuationStatus returns evacuation status.
+ GetShardEvacuationStatus(ctx context.Context, in *GetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*GetShardEvacuationStatusResponse, error)
+ // ResetShardEvacuationStatus resets evacuation status if there is no running
+ // evacuation process.
+ ResetShardEvacuationStatus(ctx context.Context, in *ResetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*ResetShardEvacuationStatusResponse, error)
+ // StopShardEvacuation stops moving all data from one shard to the others.
+ StopShardEvacuation(ctx context.Context, in *StopShardEvacuationRequest, opts ...grpc.CallOption) (*StopShardEvacuationResponse, error)
// FlushCache moves all data from one shard to the others.
FlushCache(ctx context.Context, in *FlushCacheRequest, opts ...grpc.CallOption) (*FlushCacheResponse, error)
+ // Doctor performs storage restructuring operations on engine.
+ Doctor(ctx context.Context, in *DoctorRequest, opts ...grpc.CallOption) (*DoctorResponse, error)
+ // Add local access policy engine overrides to a node.
+ AddChainLocalOverride(ctx context.Context, in *AddChainLocalOverrideRequest, opts ...grpc.CallOption) (*AddChainLocalOverrideResponse, error)
+ // Get local access policy engine overrides stored in the node by chain id.
+ GetChainLocalOverride(ctx context.Context, in *GetChainLocalOverrideRequest, opts ...grpc.CallOption) (*GetChainLocalOverrideResponse, error)
+ // List local access policy engine overrides stored in the node by container
+ // id.
+ ListChainLocalOverrides(ctx context.Context, in *ListChainLocalOverridesRequest, opts ...grpc.CallOption) (*ListChainLocalOverridesResponse, error)
+ // Remove local access policy engine overrides stored in the node by chaind
+ // id.
+ RemoveChainLocalOverride(ctx context.Context, in *RemoveChainLocalOverrideRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverrideResponse, error)
+ // Remove local access policy engine overrides stored in the node by chaind
+ // id.
+ RemoveChainLocalOverridesByTarget(ctx context.Context, in *RemoveChainLocalOverridesByTargetRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverridesByTargetResponse, error)
+ // List targets of the local APE overrides stored in the node.
+ ListTargetsLocalOverrides(ctx context.Context, in *ListTargetsLocalOverridesRequest, opts ...grpc.CallOption) (*ListTargetsLocalOverridesResponse, error)
+ // Flush objects from write-cache and move it to degraded read only mode.
+ SealWriteCache(ctx context.Context, in *SealWriteCacheRequest, opts ...grpc.CallOption) (*SealWriteCacheResponse, error)
+ // DetachShards detaches and closes shards.
+ DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error)
+ // StartShardRebuild starts shard rebuild process.
+ StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error)
+ // ListShardsForObject returns shard info where object is stored.
+ ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error)
}
type controlServiceClient struct {
@@ -54,7 +110,7 @@ func NewControlServiceClient(cc grpc.ClientConnInterface) ControlServiceClient {
func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
out := new(HealthCheckResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/HealthCheck", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_HealthCheck_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -63,7 +119,16 @@ func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckR
func (c *controlServiceClient) SetNetmapStatus(ctx context.Context, in *SetNetmapStatusRequest, opts ...grpc.CallOption) (*SetNetmapStatusResponse, error) {
out := new(SetNetmapStatusResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/SetNetmapStatus", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_SetNetmapStatus_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) GetNetmapStatus(ctx context.Context, in *GetNetmapStatusRequest, opts ...grpc.CallOption) (*GetNetmapStatusResponse, error) {
+ out := new(GetNetmapStatusResponse)
+ err := c.cc.Invoke(ctx, ControlService_GetNetmapStatus_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -72,7 +137,7 @@ func (c *controlServiceClient) SetNetmapStatus(ctx context.Context, in *SetNetma
func (c *controlServiceClient) DropObjects(ctx context.Context, in *DropObjectsRequest, opts ...grpc.CallOption) (*DropObjectsResponse, error) {
out := new(DropObjectsResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/DropObjects", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_DropObjects_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -81,7 +146,7 @@ func (c *controlServiceClient) DropObjects(ctx context.Context, in *DropObjectsR
func (c *controlServiceClient) ListShards(ctx context.Context, in *ListShardsRequest, opts ...grpc.CallOption) (*ListShardsResponse, error) {
out := new(ListShardsResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/ListShards", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_ListShards_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -90,25 +155,7 @@ func (c *controlServiceClient) ListShards(ctx context.Context, in *ListShardsReq
func (c *controlServiceClient) SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error) {
out := new(SetShardModeResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/SetShardMode", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) DumpShard(ctx context.Context, in *DumpShardRequest, opts ...grpc.CallOption) (*DumpShardResponse, error) {
- out := new(DumpShardResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/DumpShard", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) RestoreShard(ctx context.Context, in *RestoreShardRequest, opts ...grpc.CallOption) (*RestoreShardResponse, error) {
- out := new(RestoreShardResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/RestoreShard", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_SetShardMode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -117,16 +164,43 @@ func (c *controlServiceClient) RestoreShard(ctx context.Context, in *RestoreShar
func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error) {
out := new(SynchronizeTreeResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/SynchronizeTree", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_SynchronizeTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) {
- out := new(EvacuateShardResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/EvacuateShard", in, out, opts...)
+func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) {
+ out := new(StartShardEvacuationResponse)
+ err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) GetShardEvacuationStatus(ctx context.Context, in *GetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*GetShardEvacuationStatusResponse, error) {
+ out := new(GetShardEvacuationStatusResponse)
+ err := c.cc.Invoke(ctx, ControlService_GetShardEvacuationStatus_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) ResetShardEvacuationStatus(ctx context.Context, in *ResetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*ResetShardEvacuationStatusResponse, error) {
+ out := new(ResetShardEvacuationStatusResponse)
+ err := c.cc.Invoke(ctx, ControlService_ResetShardEvacuationStatus_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) StopShardEvacuation(ctx context.Context, in *StopShardEvacuationRequest, opts ...grpc.CallOption) (*StopShardEvacuationResponse, error) {
+ out := new(StopShardEvacuationResponse)
+ err := c.cc.Invoke(ctx, ControlService_StopShardEvacuation_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -135,7 +209,106 @@ func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateSh
func (c *controlServiceClient) FlushCache(ctx context.Context, in *FlushCacheRequest, opts ...grpc.CallOption) (*FlushCacheResponse, error) {
out := new(FlushCacheResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/FlushCache", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_FlushCache_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) Doctor(ctx context.Context, in *DoctorRequest, opts ...grpc.CallOption) (*DoctorResponse, error) {
+ out := new(DoctorResponse)
+ err := c.cc.Invoke(ctx, ControlService_Doctor_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) AddChainLocalOverride(ctx context.Context, in *AddChainLocalOverrideRequest, opts ...grpc.CallOption) (*AddChainLocalOverrideResponse, error) {
+ out := new(AddChainLocalOverrideResponse)
+ err := c.cc.Invoke(ctx, ControlService_AddChainLocalOverride_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) GetChainLocalOverride(ctx context.Context, in *GetChainLocalOverrideRequest, opts ...grpc.CallOption) (*GetChainLocalOverrideResponse, error) {
+ out := new(GetChainLocalOverrideResponse)
+ err := c.cc.Invoke(ctx, ControlService_GetChainLocalOverride_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) ListChainLocalOverrides(ctx context.Context, in *ListChainLocalOverridesRequest, opts ...grpc.CallOption) (*ListChainLocalOverridesResponse, error) {
+ out := new(ListChainLocalOverridesResponse)
+ err := c.cc.Invoke(ctx, ControlService_ListChainLocalOverrides_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) RemoveChainLocalOverride(ctx context.Context, in *RemoveChainLocalOverrideRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverrideResponse, error) {
+ out := new(RemoveChainLocalOverrideResponse)
+ err := c.cc.Invoke(ctx, ControlService_RemoveChainLocalOverride_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) RemoveChainLocalOverridesByTarget(ctx context.Context, in *RemoveChainLocalOverridesByTargetRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverridesByTargetResponse, error) {
+ out := new(RemoveChainLocalOverridesByTargetResponse)
+ err := c.cc.Invoke(ctx, ControlService_RemoveChainLocalOverridesByTarget_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) ListTargetsLocalOverrides(ctx context.Context, in *ListTargetsLocalOverridesRequest, opts ...grpc.CallOption) (*ListTargetsLocalOverridesResponse, error) {
+ out := new(ListTargetsLocalOverridesResponse)
+ err := c.cc.Invoke(ctx, ControlService_ListTargetsLocalOverrides_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) SealWriteCache(ctx context.Context, in *SealWriteCacheRequest, opts ...grpc.CallOption) (*SealWriteCacheResponse, error) {
+ out := new(SealWriteCacheResponse)
+ err := c.cc.Invoke(ctx, ControlService_SealWriteCache_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error) {
+ out := new(DetachShardsResponse)
+ err := c.cc.Invoke(ctx, ControlService_DetachShards_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) {
+ out := new(StartShardRebuildResponse)
+ err := c.cc.Invoke(ctx, ControlService_StartShardRebuild_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) {
+ out := new(ListShardsForObjectResponse)
+ err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -150,22 +323,52 @@ type ControlServiceServer interface {
HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
// Sets status of the storage node in FrostFS network map.
SetNetmapStatus(context.Context, *SetNetmapStatusRequest) (*SetNetmapStatusResponse, error)
+ // Gets status of the storage node in FrostFS network map.
+ GetNetmapStatus(context.Context, *GetNetmapStatusRequest) (*GetNetmapStatusResponse, error)
// Mark objects to be removed from node's local object storage.
DropObjects(context.Context, *DropObjectsRequest) (*DropObjectsResponse, error)
// Returns list that contains information about all shards of a node.
ListShards(context.Context, *ListShardsRequest) (*ListShardsResponse, error)
// Sets mode of the shard.
SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error)
- // Dump objects from the shard.
- DumpShard(context.Context, *DumpShardRequest) (*DumpShardResponse, error)
- // Restore objects from dump.
- RestoreShard(context.Context, *RestoreShardRequest) (*RestoreShardResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error)
- // EvacuateShard moves all data from one shard to the others.
- EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error)
+ // StartShardEvacuation starts moving all data from one shard to the others.
+ StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error)
+ // GetShardEvacuationStatus returns evacuation status.
+ GetShardEvacuationStatus(context.Context, *GetShardEvacuationStatusRequest) (*GetShardEvacuationStatusResponse, error)
+ // ResetShardEvacuationStatus resets evacuation status if there is no running
+ // evacuation process.
+ ResetShardEvacuationStatus(context.Context, *ResetShardEvacuationStatusRequest) (*ResetShardEvacuationStatusResponse, error)
+ // StopShardEvacuation stops moving all data from one shard to the others.
+ StopShardEvacuation(context.Context, *StopShardEvacuationRequest) (*StopShardEvacuationResponse, error)
// FlushCache moves all data from one shard to the others.
FlushCache(context.Context, *FlushCacheRequest) (*FlushCacheResponse, error)
+ // Doctor performs storage restructuring operations on engine.
+ Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error)
+ // Add local access policy engine overrides to a node.
+ AddChainLocalOverride(context.Context, *AddChainLocalOverrideRequest) (*AddChainLocalOverrideResponse, error)
+ // Get local access policy engine overrides stored in the node by chain id.
+ GetChainLocalOverride(context.Context, *GetChainLocalOverrideRequest) (*GetChainLocalOverrideResponse, error)
+ // List local access policy engine overrides stored in the node by container
+ // id.
+ ListChainLocalOverrides(context.Context, *ListChainLocalOverridesRequest) (*ListChainLocalOverridesResponse, error)
+ // Remove local access policy engine overrides stored in the node by chaind
+ // id.
+ RemoveChainLocalOverride(context.Context, *RemoveChainLocalOverrideRequest) (*RemoveChainLocalOverrideResponse, error)
+ // Remove local access policy engine overrides stored in the node by chaind
+ // id.
+ RemoveChainLocalOverridesByTarget(context.Context, *RemoveChainLocalOverridesByTargetRequest) (*RemoveChainLocalOverridesByTargetResponse, error)
+ // List targets of the local APE overrides stored in the node.
+ ListTargetsLocalOverrides(context.Context, *ListTargetsLocalOverridesRequest) (*ListTargetsLocalOverridesResponse, error)
+ // Flush objects from write-cache and move it to degraded read only mode.
+ SealWriteCache(context.Context, *SealWriteCacheRequest) (*SealWriteCacheResponse, error)
+ // DetachShards detaches and closes shards.
+ DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error)
+ // StartShardRebuild starts shard rebuild process.
+ StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error)
+ // ListShardsForObject returns shard info where object is stored.
+ ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -178,6 +381,9 @@ func (UnimplementedControlServiceServer) HealthCheck(context.Context, *HealthChe
func (UnimplementedControlServiceServer) SetNetmapStatus(context.Context, *SetNetmapStatusRequest) (*SetNetmapStatusResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetNetmapStatus not implemented")
}
+func (UnimplementedControlServiceServer) GetNetmapStatus(context.Context, *GetNetmapStatusRequest) (*GetNetmapStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetNetmapStatus not implemented")
+}
func (UnimplementedControlServiceServer) DropObjects(context.Context, *DropObjectsRequest) (*DropObjectsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DropObjects not implemented")
}
@@ -187,21 +393,57 @@ func (UnimplementedControlServiceServer) ListShards(context.Context, *ListShards
func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetShardMode not implemented")
}
-func (UnimplementedControlServiceServer) DumpShard(context.Context, *DumpShardRequest) (*DumpShardResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DumpShard not implemented")
-}
-func (UnimplementedControlServiceServer) RestoreShard(context.Context, *RestoreShardRequest) (*RestoreShardResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RestoreShard not implemented")
-}
func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented")
}
-func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method EvacuateShard not implemented")
+func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented")
+}
+func (UnimplementedControlServiceServer) GetShardEvacuationStatus(context.Context, *GetShardEvacuationStatusRequest) (*GetShardEvacuationStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetShardEvacuationStatus not implemented")
+}
+func (UnimplementedControlServiceServer) ResetShardEvacuationStatus(context.Context, *ResetShardEvacuationStatusRequest) (*ResetShardEvacuationStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ResetShardEvacuationStatus not implemented")
+}
+func (UnimplementedControlServiceServer) StopShardEvacuation(context.Context, *StopShardEvacuationRequest) (*StopShardEvacuationResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method StopShardEvacuation not implemented")
}
func (UnimplementedControlServiceServer) FlushCache(context.Context, *FlushCacheRequest) (*FlushCacheResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FlushCache not implemented")
}
+func (UnimplementedControlServiceServer) Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Doctor not implemented")
+}
+func (UnimplementedControlServiceServer) AddChainLocalOverride(context.Context, *AddChainLocalOverrideRequest) (*AddChainLocalOverrideResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddChainLocalOverride not implemented")
+}
+func (UnimplementedControlServiceServer) GetChainLocalOverride(context.Context, *GetChainLocalOverrideRequest) (*GetChainLocalOverrideResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetChainLocalOverride not implemented")
+}
+func (UnimplementedControlServiceServer) ListChainLocalOverrides(context.Context, *ListChainLocalOverridesRequest) (*ListChainLocalOverridesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListChainLocalOverrides not implemented")
+}
+func (UnimplementedControlServiceServer) RemoveChainLocalOverride(context.Context, *RemoveChainLocalOverrideRequest) (*RemoveChainLocalOverrideResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RemoveChainLocalOverride not implemented")
+}
+func (UnimplementedControlServiceServer) RemoveChainLocalOverridesByTarget(context.Context, *RemoveChainLocalOverridesByTargetRequest) (*RemoveChainLocalOverridesByTargetResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RemoveChainLocalOverridesByTarget not implemented")
+}
+func (UnimplementedControlServiceServer) ListTargetsLocalOverrides(context.Context, *ListTargetsLocalOverridesRequest) (*ListTargetsLocalOverridesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListTargetsLocalOverrides not implemented")
+}
+func (UnimplementedControlServiceServer) SealWriteCache(context.Context, *SealWriteCacheRequest) (*SealWriteCacheResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SealWriteCache not implemented")
+}
+func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DetachShards not implemented")
+}
+func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented")
+}
+func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented")
+}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -224,7 +466,7 @@ func _ControlService_HealthCheck_Handler(srv interface{}, ctx context.Context, d
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/HealthCheck",
+ FullMethod: ControlService_HealthCheck_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).HealthCheck(ctx, req.(*HealthCheckRequest))
@@ -242,7 +484,7 @@ func _ControlService_SetNetmapStatus_Handler(srv interface{}, ctx context.Contex
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/SetNetmapStatus",
+ FullMethod: ControlService_SetNetmapStatus_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).SetNetmapStatus(ctx, req.(*SetNetmapStatusRequest))
@@ -250,6 +492,24 @@ func _ControlService_SetNetmapStatus_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _ControlService_GetNetmapStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNetmapStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).GetNetmapStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_GetNetmapStatus_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).GetNetmapStatus(ctx, req.(*GetNetmapStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _ControlService_DropObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DropObjectsRequest)
if err := dec(in); err != nil {
@@ -260,7 +520,7 @@ func _ControlService_DropObjects_Handler(srv interface{}, ctx context.Context, d
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/DropObjects",
+ FullMethod: ControlService_DropObjects_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).DropObjects(ctx, req.(*DropObjectsRequest))
@@ -278,7 +538,7 @@ func _ControlService_ListShards_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/ListShards",
+ FullMethod: ControlService_ListShards_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).ListShards(ctx, req.(*ListShardsRequest))
@@ -296,7 +556,7 @@ func _ControlService_SetShardMode_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/SetShardMode",
+ FullMethod: ControlService_SetShardMode_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).SetShardMode(ctx, req.(*SetShardModeRequest))
@@ -304,42 +564,6 @@ func _ControlService_SetShardMode_Handler(srv interface{}, ctx context.Context,
return interceptor(ctx, in, info, handler)
}
-func _ControlService_DumpShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DumpShardRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).DumpShard(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/control.ControlService/DumpShard",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).DumpShard(ctx, req.(*DumpShardRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_RestoreShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RestoreShardRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).RestoreShard(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/control.ControlService/RestoreShard",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).RestoreShard(ctx, req.(*RestoreShardRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SynchronizeTreeRequest)
if err := dec(in); err != nil {
@@ -350,7 +574,7 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/SynchronizeTree",
+ FullMethod: ControlService_SynchronizeTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).SynchronizeTree(ctx, req.(*SynchronizeTreeRequest))
@@ -358,20 +582,74 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
-func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(EvacuateShardRequest)
+func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StartShardEvacuationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(ControlServiceServer).EvacuateShard(ctx, in)
+ return srv.(ControlServiceServer).StartShardEvacuation(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/EvacuateShard",
+ FullMethod: ControlService_StartShardEvacuation_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest))
+ return srv.(ControlServiceServer).StartShardEvacuation(ctx, req.(*StartShardEvacuationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_GetShardEvacuationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetShardEvacuationStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).GetShardEvacuationStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_GetShardEvacuationStatus_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).GetShardEvacuationStatus(ctx, req.(*GetShardEvacuationStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_ResetShardEvacuationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ResetShardEvacuationStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).ResetShardEvacuationStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_ResetShardEvacuationStatus_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).ResetShardEvacuationStatus(ctx, req.(*ResetShardEvacuationStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_StopShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StopShardEvacuationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).StopShardEvacuation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_StopShardEvacuation_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).StopShardEvacuation(ctx, req.(*StopShardEvacuationRequest))
}
return interceptor(ctx, in, info, handler)
}
@@ -386,7 +664,7 @@ func _ControlService_FlushCache_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/FlushCache",
+ FullMethod: ControlService_FlushCache_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).FlushCache(ctx, req.(*FlushCacheRequest))
@@ -394,6 +672,204 @@ func _ControlService_FlushCache_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
+func _ControlService_Doctor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DoctorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).Doctor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_Doctor_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).Doctor(ctx, req.(*DoctorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_AddChainLocalOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddChainLocalOverrideRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).AddChainLocalOverride(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_AddChainLocalOverride_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).AddChainLocalOverride(ctx, req.(*AddChainLocalOverrideRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_GetChainLocalOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetChainLocalOverrideRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).GetChainLocalOverride(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_GetChainLocalOverride_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).GetChainLocalOverride(ctx, req.(*GetChainLocalOverrideRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_ListChainLocalOverrides_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListChainLocalOverridesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).ListChainLocalOverrides(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_ListChainLocalOverrides_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).ListChainLocalOverrides(ctx, req.(*ListChainLocalOverridesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_RemoveChainLocalOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RemoveChainLocalOverrideRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).RemoveChainLocalOverride(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_RemoveChainLocalOverride_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).RemoveChainLocalOverride(ctx, req.(*RemoveChainLocalOverrideRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_RemoveChainLocalOverridesByTarget_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RemoveChainLocalOverridesByTargetRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).RemoveChainLocalOverridesByTarget(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_RemoveChainLocalOverridesByTarget_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).RemoveChainLocalOverridesByTarget(ctx, req.(*RemoveChainLocalOverridesByTargetRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_ListTargetsLocalOverrides_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListTargetsLocalOverridesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).ListTargetsLocalOverrides(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_ListTargetsLocalOverrides_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).ListTargetsLocalOverrides(ctx, req.(*ListTargetsLocalOverridesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_SealWriteCache_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SealWriteCacheRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).SealWriteCache(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_SealWriteCache_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).SealWriteCache(ctx, req.(*SealWriteCacheRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_DetachShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DetachShardsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).DetachShards(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_DetachShards_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).DetachShards(ctx, req.(*DetachShardsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StartShardRebuildRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).StartShardRebuild(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_StartShardRebuild_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).StartShardRebuild(ctx, req.(*StartShardRebuildRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListShardsForObjectRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).ListShardsForObject(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_ListShardsForObject_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -409,6 +885,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SetNetmapStatus",
Handler: _ControlService_SetNetmapStatus_Handler,
},
+ {
+ MethodName: "GetNetmapStatus",
+ Handler: _ControlService_GetNetmapStatus_Handler,
+ },
{
MethodName: "DropObjects",
Handler: _ControlService_DropObjects_Handler,
@@ -421,26 +901,74 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SetShardMode",
Handler: _ControlService_SetShardMode_Handler,
},
- {
- MethodName: "DumpShard",
- Handler: _ControlService_DumpShard_Handler,
- },
- {
- MethodName: "RestoreShard",
- Handler: _ControlService_RestoreShard_Handler,
- },
{
MethodName: "SynchronizeTree",
Handler: _ControlService_SynchronizeTree_Handler,
},
{
- MethodName: "EvacuateShard",
- Handler: _ControlService_EvacuateShard_Handler,
+ MethodName: "StartShardEvacuation",
+ Handler: _ControlService_StartShardEvacuation_Handler,
+ },
+ {
+ MethodName: "GetShardEvacuationStatus",
+ Handler: _ControlService_GetShardEvacuationStatus_Handler,
+ },
+ {
+ MethodName: "ResetShardEvacuationStatus",
+ Handler: _ControlService_ResetShardEvacuationStatus_Handler,
+ },
+ {
+ MethodName: "StopShardEvacuation",
+ Handler: _ControlService_StopShardEvacuation_Handler,
},
{
MethodName: "FlushCache",
Handler: _ControlService_FlushCache_Handler,
},
+ {
+ MethodName: "Doctor",
+ Handler: _ControlService_Doctor_Handler,
+ },
+ {
+ MethodName: "AddChainLocalOverride",
+ Handler: _ControlService_AddChainLocalOverride_Handler,
+ },
+ {
+ MethodName: "GetChainLocalOverride",
+ Handler: _ControlService_GetChainLocalOverride_Handler,
+ },
+ {
+ MethodName: "ListChainLocalOverrides",
+ Handler: _ControlService_ListChainLocalOverrides_Handler,
+ },
+ {
+ MethodName: "RemoveChainLocalOverride",
+ Handler: _ControlService_RemoveChainLocalOverride_Handler,
+ },
+ {
+ MethodName: "RemoveChainLocalOverridesByTarget",
+ Handler: _ControlService_RemoveChainLocalOverridesByTarget_Handler,
+ },
+ {
+ MethodName: "ListTargetsLocalOverrides",
+ Handler: _ControlService_ListTargetsLocalOverrides_Handler,
+ },
+ {
+ MethodName: "SealWriteCache",
+ Handler: _ControlService_SealWriteCache_Handler,
+ },
+ {
+ MethodName: "DetachShards",
+ Handler: _ControlService_DetachShards_Handler,
+ },
+ {
+ MethodName: "StartShardRebuild",
+ Handler: _ControlService_StartShardRebuild_Handler,
+ },
+ {
+ MethodName: "ListShardsForObject",
+ Handler: _ControlService_ListShardsForObject_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",
diff --git a/pkg/services/control/service_test.go b/pkg/services/control/service_test.go
deleted file mode 100644
index f39a304f9d..0000000000
--- a/pkg/services/control/service_test.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package control_test
-
-import (
- "bytes"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
-)
-
-func TestHealthCheckResponse_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateHealthCheckResponseBody(),
- new(control.HealthCheckResponse_Body),
- func(m1, m2 protoMessage) bool {
- return equalHealthCheckResponseBodies(
- m1.(*control.HealthCheckResponse_Body),
- m2.(*control.HealthCheckResponse_Body),
- )
- },
- )
-}
-
-func generateHealthCheckResponseBody() *control.HealthCheckResponse_Body {
- body := new(control.HealthCheckResponse_Body)
- body.SetNetmapStatus(control.NetmapStatus_ONLINE)
- body.SetHealthStatus(control.HealthStatus_SHUTTING_DOWN)
-
- return body
-}
-
-func equalHealthCheckResponseBodies(b1, b2 *control.HealthCheckResponse_Body) bool {
- return b1.GetNetmapStatus() == b2.GetNetmapStatus() &&
- b1.GetHealthStatus() == b2.GetHealthStatus()
-}
-
-func TestSetNetmapStatusRequest_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateSetNetmapStatusRequestBody(),
- new(control.SetNetmapStatusRequest_Body),
- func(m1, m2 protoMessage) bool {
- return equalSetnetmapStatusRequestBodies(
- m1.(*control.SetNetmapStatusRequest_Body),
- m2.(*control.SetNetmapStatusRequest_Body),
- )
- },
- )
-}
-
-func generateSetNetmapStatusRequestBody() *control.SetNetmapStatusRequest_Body {
- body := new(control.SetNetmapStatusRequest_Body)
- body.SetStatus(control.NetmapStatus_ONLINE)
-
- return body
-}
-
-func equalSetnetmapStatusRequestBodies(b1, b2 *control.SetNetmapStatusRequest_Body) bool {
- return b1.GetStatus() == b2.GetStatus()
-}
-
-func TestListShardsResponse_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateListShardsResponseBody(),
- new(control.ListShardsResponse_Body),
- func(m1, m2 protoMessage) bool {
- return equalListShardResponseBodies(
- m1.(*control.ListShardsResponse_Body),
- m2.(*control.ListShardsResponse_Body),
- )
- },
- )
-}
-
-func equalListShardResponseBodies(b1, b2 *control.ListShardsResponse_Body) bool {
- if len(b1.Shards) != len(b2.Shards) {
- return false
- }
-
- for i := range b1.Shards {
- if b1.Shards[i].GetMetabasePath() != b2.Shards[i].GetMetabasePath() ||
- b1.Shards[i].GetWritecachePath() != b2.Shards[i].GetWritecachePath() ||
- b1.Shards[i].GetPiloramaPath() != b2.Shards[i].GetPiloramaPath() ||
- !bytes.Equal(b1.Shards[i].GetShard_ID(), b2.Shards[i].GetShard_ID()) {
- return false
- }
-
- info1 := b1.Shards[i].GetBlobstor()
- info2 := b2.Shards[i].GetBlobstor()
- return compareBlobstorInfo(info1, info2)
- }
-
- for i := range b1.Shards {
- for j := i + 1; j < len(b1.Shards); j++ {
- if b1.Shards[i].GetMetabasePath() == b2.Shards[j].GetMetabasePath() ||
- !compareBlobstorInfo(b1.Shards[i].Blobstor, b2.Shards[i].Blobstor) ||
- b1.Shards[i].GetWritecachePath() == b2.Shards[j].GetWritecachePath() ||
- bytes.Equal(b1.Shards[i].GetShard_ID(), b2.Shards[j].GetShard_ID()) {
- return false
- }
- }
- }
-
- return true
-}
-func compareBlobstorInfo(a, b []*control.BlobstorInfo) bool {
- if len(a) != len(b) {
- return false
- }
- for i := range a {
- if a[i].Type != b[i].Type ||
- a[i].Path != b[i].Path {
- return false
- }
- }
- return true
-}
-
-func generateListShardsResponseBody() *control.ListShardsResponse_Body {
- body := new(control.ListShardsResponse_Body)
- body.SetShards([]*control.ShardInfo{
- generateShardInfo(0),
- generateShardInfo(1),
- })
-
- return body
-}
-
-func TestSetShardModeRequest_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateSetShardModeRequestBody(),
- new(control.SetShardModeRequest_Body),
- func(m1, m2 protoMessage) bool {
- return equalSetShardModeRequestBodies(
- m1.(*control.SetShardModeRequest_Body),
- m2.(*control.SetShardModeRequest_Body),
- )
- },
- )
-}
-
-func generateSetShardModeRequestBody() *control.SetShardModeRequest_Body {
- body := new(control.SetShardModeRequest_Body)
- body.SetShardIDList([][]byte{{0, 1, 2, 3, 4}})
- body.SetMode(control.ShardMode_READ_WRITE)
-
- return body
-}
-
-func equalSetShardModeRequestBodies(b1, b2 *control.SetShardModeRequest_Body) bool {
- if b1.GetMode() != b2.GetMode() || len(b1.Shard_ID) != len(b2.Shard_ID) {
- return false
- }
-
- for i := range b1.Shard_ID {
- if !bytes.Equal(b1.Shard_ID[i], b2.Shard_ID[i]) {
- return false
- }
- }
-
- return true
-}
-
-func TestSynchronizeTreeRequest_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- &control.SynchronizeTreeRequest_Body{
- ContainerId: []byte{1, 2, 3, 4, 5, 6, 7},
- TreeId: "someID",
- Height: 42,
- },
- new(control.SynchronizeTreeRequest_Body),
- func(m1, m2 protoMessage) bool {
- b1 := m1.(*control.SynchronizeTreeRequest_Body)
- b2 := m2.(*control.SynchronizeTreeRequest_Body)
- return bytes.Equal(b1.GetContainerId(), b2.GetContainerId()) &&
- b1.GetTreeId() == b2.GetTreeId() &&
- b1.GetHeight() == b2.GetHeight()
- },
- )
-}
diff --git a/pkg/services/control/types.go b/pkg/services/control/types.go
deleted file mode 100644
index 94f681c557..0000000000
--- a/pkg/services/control/types.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package control
-
-import (
- "google.golang.org/protobuf/encoding/protojson"
-)
-
-// SetKey sets public key used for signing.
-func (x *Signature) SetKey(v []byte) {
- if x != nil {
- x.Key = v
- }
-}
-
-// SetSign sets binary signature.
-func (x *Signature) SetSign(v []byte) {
- if x != nil {
- x.Sign = v
- }
-}
-
-// SetKey sets key of the node attribute.
-func (x *NodeInfo_Attribute) SetKey(v string) {
- if x != nil {
- x.Key = v
- }
-}
-
-// SetValue sets value of the node attribute.
-func (x *NodeInfo_Attribute) SetValue(v string) {
- if x != nil {
- x.Value = v
- }
-}
-
-// SetParents sets parent keys.
-func (x *NodeInfo_Attribute) SetParents(v []string) {
- if x != nil {
- x.Parents = v
- }
-}
-
-// SetPublicKey sets public key of the FrostFS node in a binary format.
-func (x *NodeInfo) SetPublicKey(v []byte) {
- if x != nil {
- x.PublicKey = v
- }
-}
-
-// SetAddresses sets ways to connect to a node.
-func (x *NodeInfo) SetAddresses(v []string) {
- if x != nil {
- x.Addresses = v
- }
-}
-
-// SetAttributes sets attributes of the FrostFS Storage Node.
-func (x *NodeInfo) SetAttributes(v []*NodeInfo_Attribute) {
- if x != nil {
- x.Attributes = v
- }
-}
-
-// SetState sets state of the FrostFS node.
-func (x *NodeInfo) SetState(v NetmapStatus) {
- if x != nil {
- x.State = v
- }
-}
-
-// SetEpoch sets revision number of the network map.
-func (x *Netmap) SetEpoch(v uint64) {
- if x != nil {
- x.Epoch = v
- }
-}
-
-// SetNodes sets nodes presented in network.
-func (x *Netmap) SetNodes(v []*NodeInfo) {
- if x != nil {
- x.Nodes = v
- }
-}
-
-func (x *Netmap) MarshalJSON() ([]byte, error) {
- return protojson.MarshalOptions{
- EmitUnpopulated: true,
- }.Marshal(x)
-}
-
-// SetID sets identificator of the shard.
-func (x *ShardInfo) SetID(v []byte) {
- x.Shard_ID = v
-}
-
-// SetMetabasePath sets path to shard's metabase.
-func (x *ShardInfo) SetMetabasePath(v string) {
- x.MetabasePath = v
-}
-
-// SetWriteCachePath sets path to shard's write-cache.
-func (x *ShardInfo) SetWriteCachePath(v string) {
- x.WritecachePath = v
-}
-
-// SetPiloramaPath sets path to shard's pilorama.
-func (x *ShardInfo) SetPiloramaPath(v string) {
- x.PiloramaPath = v
-}
-
-// SetMode sets path to shard's work mode.
-func (x *ShardInfo) SetMode(v ShardMode) {
- x.Mode = v
-}
-
-// SetErrorCount sets shard's error counter.
-func (x *ShardInfo) SetErrorCount(count uint32) {
- x.ErrorCount = count
-}
diff --git a/pkg/services/control/types.pb.go b/pkg/services/control/types.pb.go
deleted file mode 100644
index 735517d3df..0000000000
--- a/pkg/services/control/types.pb.go
+++ /dev/null
@@ -1,871 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.21.12
-// source: pkg/services/control/types.proto
-
-package control
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Status of the storage node in the FrostFS network map.
-type NetmapStatus int32
-
-const (
- // Undefined status, default value.
- NetmapStatus_STATUS_UNDEFINED NetmapStatus = 0
- // Node is online.
- NetmapStatus_ONLINE NetmapStatus = 1
- // Node is offline.
- NetmapStatus_OFFLINE NetmapStatus = 2
- // Node is maintained by the owner.
- NetmapStatus_MAINTENANCE NetmapStatus = 3
-)
-
-// Enum value maps for NetmapStatus.
-var (
- NetmapStatus_name = map[int32]string{
- 0: "STATUS_UNDEFINED",
- 1: "ONLINE",
- 2: "OFFLINE",
- 3: "MAINTENANCE",
- }
- NetmapStatus_value = map[string]int32{
- "STATUS_UNDEFINED": 0,
- "ONLINE": 1,
- "OFFLINE": 2,
- "MAINTENANCE": 3,
- }
-)
-
-func (x NetmapStatus) Enum() *NetmapStatus {
- p := new(NetmapStatus)
- *p = x
- return p
-}
-
-func (x NetmapStatus) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (NetmapStatus) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_types_proto_enumTypes[0].Descriptor()
-}
-
-func (NetmapStatus) Type() protoreflect.EnumType {
- return &file_pkg_services_control_types_proto_enumTypes[0]
-}
-
-func (x NetmapStatus) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use NetmapStatus.Descriptor instead.
-func (NetmapStatus) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{0}
-}
-
-// Health status of the storage node application.
-type HealthStatus int32
-
-const (
- // Undefined status, default value.
- HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
- // Storage node application is starting.
- HealthStatus_STARTING HealthStatus = 1
- // Storage node application is started and serves all services.
- HealthStatus_READY HealthStatus = 2
- // Storage node application is shutting down.
- HealthStatus_SHUTTING_DOWN HealthStatus = 3
-)
-
-// Enum value maps for HealthStatus.
-var (
- HealthStatus_name = map[int32]string{
- 0: "HEALTH_STATUS_UNDEFINED",
- 1: "STARTING",
- 2: "READY",
- 3: "SHUTTING_DOWN",
- }
- HealthStatus_value = map[string]int32{
- "HEALTH_STATUS_UNDEFINED": 0,
- "STARTING": 1,
- "READY": 2,
- "SHUTTING_DOWN": 3,
- }
-)
-
-func (x HealthStatus) Enum() *HealthStatus {
- p := new(HealthStatus)
- *p = x
- return p
-}
-
-func (x HealthStatus) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (HealthStatus) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_types_proto_enumTypes[1].Descriptor()
-}
-
-func (HealthStatus) Type() protoreflect.EnumType {
- return &file_pkg_services_control_types_proto_enumTypes[1]
-}
-
-func (x HealthStatus) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use HealthStatus.Descriptor instead.
-func (HealthStatus) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1}
-}
-
-// Work mode of the shard.
-type ShardMode int32
-
-const (
- // Undefined mode, default value.
- ShardMode_SHARD_MODE_UNDEFINED ShardMode = 0
- // Read-write.
- ShardMode_READ_WRITE ShardMode = 1
- // Read-only.
- ShardMode_READ_ONLY ShardMode = 2
- // Degraded.
- ShardMode_DEGRADED ShardMode = 3
- // DegradedReadOnly.
- ShardMode_DEGRADED_READ_ONLY ShardMode = 4
-)
-
-// Enum value maps for ShardMode.
-var (
- ShardMode_name = map[int32]string{
- 0: "SHARD_MODE_UNDEFINED",
- 1: "READ_WRITE",
- 2: "READ_ONLY",
- 3: "DEGRADED",
- 4: "DEGRADED_READ_ONLY",
- }
- ShardMode_value = map[string]int32{
- "SHARD_MODE_UNDEFINED": 0,
- "READ_WRITE": 1,
- "READ_ONLY": 2,
- "DEGRADED": 3,
- "DEGRADED_READ_ONLY": 4,
- }
-)
-
-func (x ShardMode) Enum() *ShardMode {
- p := new(ShardMode)
- *p = x
- return p
-}
-
-func (x ShardMode) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ShardMode) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_types_proto_enumTypes[2].Descriptor()
-}
-
-func (ShardMode) Type() protoreflect.EnumType {
- return &file_pkg_services_control_types_proto_enumTypes[2]
-}
-
-func (x ShardMode) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ShardMode.Descriptor instead.
-func (ShardMode) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{2}
-}
-
-// Signature of some message.
-type Signature struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Public key used for signing.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Binary signature.
- Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
-}
-
-func (x *Signature) Reset() {
- *x = Signature{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Signature) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Signature) ProtoMessage() {}
-
-func (x *Signature) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
-func (*Signature) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Signature) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-
-func (x *Signature) GetSign() []byte {
- if x != nil {
- return x.Sign
- }
- return nil
-}
-
-// FrostFS node description.
-type NodeInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Public key of the FrostFS node in a binary format.
- PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
- // Ways to connect to a node.
- Addresses []string `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"`
- // Carries list of the FrostFS node attributes in a key-value form. Key name
- // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo
- // structures with duplicated attribute names or attributes with empty values
- // will be considered invalid.
- Attributes []*NodeInfo_Attribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
- // Carries state of the FrostFS node.
- State NetmapStatus `protobuf:"varint,4,opt,name=state,proto3,enum=control.NetmapStatus" json:"state,omitempty"`
-}
-
-func (x *NodeInfo) Reset() {
- *x = NodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NodeInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*NodeInfo) ProtoMessage() {}
-
-func (x *NodeInfo) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead.
-func (*NodeInfo) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *NodeInfo) GetPublicKey() []byte {
- if x != nil {
- return x.PublicKey
- }
- return nil
-}
-
-func (x *NodeInfo) GetAddresses() []string {
- if x != nil {
- return x.Addresses
- }
- return nil
-}
-
-func (x *NodeInfo) GetAttributes() []*NodeInfo_Attribute {
- if x != nil {
- return x.Attributes
- }
- return nil
-}
-
-func (x *NodeInfo) GetState() NetmapStatus {
- if x != nil {
- return x.State
- }
- return NetmapStatus_STATUS_UNDEFINED
-}
-
-// Network map structure.
-type Netmap struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Network map revision number.
- Epoch uint64 `protobuf:"varint,1,opt,name=epoch,proto3" json:"epoch,omitempty"`
- // Nodes presented in network.
- Nodes []*NodeInfo `protobuf:"bytes,2,rep,name=nodes,proto3" json:"nodes,omitempty"`
-}
-
-func (x *Netmap) Reset() {
- *x = Netmap{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Netmap) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Netmap) ProtoMessage() {}
-
-func (x *Netmap) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Netmap.ProtoReflect.Descriptor instead.
-func (*Netmap) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Netmap) GetEpoch() uint64 {
- if x != nil {
- return x.Epoch
- }
- return 0
-}
-
-func (x *Netmap) GetNodes() []*NodeInfo {
- if x != nil {
- return x.Nodes
- }
- return nil
-}
-
-// Shard description.
-type ShardInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Path to shard's metabase.
- MetabasePath string `protobuf:"bytes,2,opt,name=metabase_path,json=metabasePath,proto3" json:"metabase_path,omitempty"`
- // Shard's blobstor info.
- Blobstor []*BlobstorInfo `protobuf:"bytes,3,rep,name=blobstor,proto3" json:"blobstor,omitempty"`
- // Path to shard's write-cache, empty if disabled.
- WritecachePath string `protobuf:"bytes,4,opt,name=writecache_path,json=writecachePath,proto3" json:"writecache_path,omitempty"`
- // Work mode of the shard.
- Mode ShardMode `protobuf:"varint,5,opt,name=mode,proto3,enum=control.ShardMode" json:"mode,omitempty"`
- // Amount of errors occured.
- ErrorCount uint32 `protobuf:"varint,6,opt,name=errorCount,proto3" json:"errorCount,omitempty"`
- // Path to shard's pilorama storage.
- PiloramaPath string `protobuf:"bytes,7,opt,name=pilorama_path,json=piloramaPath,proto3" json:"pilorama_path,omitempty"`
-}
-
-func (x *ShardInfo) Reset() {
- *x = ShardInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ShardInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ShardInfo) ProtoMessage() {}
-
-func (x *ShardInfo) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ShardInfo.ProtoReflect.Descriptor instead.
-func (*ShardInfo) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *ShardInfo) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *ShardInfo) GetMetabasePath() string {
- if x != nil {
- return x.MetabasePath
- }
- return ""
-}
-
-func (x *ShardInfo) GetBlobstor() []*BlobstorInfo {
- if x != nil {
- return x.Blobstor
- }
- return nil
-}
-
-func (x *ShardInfo) GetWritecachePath() string {
- if x != nil {
- return x.WritecachePath
- }
- return ""
-}
-
-func (x *ShardInfo) GetMode() ShardMode {
- if x != nil {
- return x.Mode
- }
- return ShardMode_SHARD_MODE_UNDEFINED
-}
-
-func (x *ShardInfo) GetErrorCount() uint32 {
- if x != nil {
- return x.ErrorCount
- }
- return 0
-}
-
-func (x *ShardInfo) GetPiloramaPath() string {
- if x != nil {
- return x.PiloramaPath
- }
- return ""
-}
-
-// Blobstor component description.
-type BlobstorInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Path to the root.
- Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
- // Component type.
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
-}
-
-func (x *BlobstorInfo) Reset() {
- *x = BlobstorInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BlobstorInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BlobstorInfo) ProtoMessage() {}
-
-func (x *BlobstorInfo) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BlobstorInfo.ProtoReflect.Descriptor instead.
-func (*BlobstorInfo) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *BlobstorInfo) GetPath() string {
- if x != nil {
- return x.Path
- }
- return ""
-}
-
-func (x *BlobstorInfo) GetType() string {
- if x != nil {
- return x.Type
- }
- return ""
-}
-
-// Administrator-defined Attributes of the FrostFS Storage Node.
-//
-// `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8
-// string. Value can't be empty.
-//
-// Node's attributes are mostly used during Storage Policy evaluation to
-// calculate object's placement and find a set of nodes satisfying policy
-// requirements. There are some "well-known" node attributes common to all the
-// Storage Nodes in the network and used implicitly with default values if not
-// explicitly set:
-//
-// - Capacity \
-// Total available disk space in Gigabytes.
-// - Price \
-// Price in GAS tokens for storing one GB of data during one Epoch. In node
-// attributes it's a string presenting floating point number with comma or
-// point delimiter for decimal part. In the Network Map it will be saved as
-// 64-bit unsigned integer representing number of minimal token fractions.
-// - Subnet \
-// String ID of Node's storage subnet. There can be only one subnet served
-// by the Storage Node.
-// - Locode \
-// Node's geographic location in
-// [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html)
-// format approximated to the nearest point defined in standard.
-// - Country \
-// Country code in
-// [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
-// format. Calculated automatically from `Locode` attribute
-// - Region \
-// Country's administative subdivision where node is located. Calculated
-// automatically from `Locode` attribute based on `SubDiv` field. Presented
-// in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) format.
-// - City \
-// City, town, village or rural area name where node is located written
-// without diacritics . Calculated automatically from `Locode` attribute.
-//
-// For detailed description of each well-known attribute please see the
-// corresponding section in FrostFS Technical specification.
-type NodeInfo_Attribute struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Key of the node attribute.
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Value of the node attribute.
- Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- // Parent keys, if any. For example for `City` it could be `Region` and
- // `Country`.
- Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"`
-}
-
-func (x *NodeInfo_Attribute) Reset() {
- *x = NodeInfo_Attribute{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NodeInfo_Attribute) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*NodeInfo_Attribute) ProtoMessage() {}
-
-func (x *NodeInfo_Attribute) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use NodeInfo_Attribute.ProtoReflect.Descriptor instead.
-func (*NodeInfo_Attribute) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1, 0}
-}
-
-func (x *NodeInfo_Attribute) GetKey() string {
- if x != nil {
- return x.Key
- }
- return ""
-}
-
-func (x *NodeInfo_Attribute) GetValue() string {
- if x != nil {
- return x.Value
- }
- return ""
-}
-
-func (x *NodeInfo_Attribute) GetParents() []string {
- if x != nil {
- return x.Parents
- }
- return nil
-}
-
-var File_pkg_services_control_types_proto protoreflect.FileDescriptor
-
-var file_pkg_services_control_types_proto_rawDesc = []byte{
- 0x0a, 0x20, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x22, 0x36, 0x0a, 0x09, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69,
- 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f,
- 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12,
- 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x3b, 0x0a,
- 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x6f, 0x64, 0x65,
- 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a,
- 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x05, 0x73, 0x74,
- 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x4d, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69,
- 0x62, 0x75, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x47, 0x0a, 0x06, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
- 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x27, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22,
- 0x94, 0x02, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a,
- 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a,
- 0x08, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x74,
- 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72,
- 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x70,
- 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x63, 0x61, 0x63, 0x68, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x6f, 0x64,
- 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64,
- 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e,
- 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x69, 0x6c, 0x6f, 0x72, 0x61, 0x6d, 0x61, 0x5f, 0x70, 0x61,
- 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x69, 0x6c, 0x6f, 0x72, 0x61,
- 0x6d, 0x61, 0x50, 0x61, 0x74, 0x68, 0x22, 0x36, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x74,
- 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x2a, 0x4e,
- 0x0a, 0x0c, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14,
- 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e,
- 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x01,
- 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x46, 0x46, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a,
- 0x0b, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x2a, 0x57,
- 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b,
- 0x0a, 0x17, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f,
- 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53,
- 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41,
- 0x44, 0x59, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x48, 0x55, 0x54, 0x54, 0x49, 0x4e, 0x47,
- 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x03, 0x2a, 0x6a, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x48, 0x41, 0x52, 0x44, 0x5f, 0x4d, 0x4f,
- 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e,
- 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0d,
- 0x0a, 0x09, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a,
- 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x44,
- 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c,
- 0x59, 0x10, 0x04, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74,
- 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75,
- 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64,
- 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_control_types_proto_rawDescOnce sync.Once
- file_pkg_services_control_types_proto_rawDescData = file_pkg_services_control_types_proto_rawDesc
-)
-
-func file_pkg_services_control_types_proto_rawDescGZIP() []byte {
- file_pkg_services_control_types_proto_rawDescOnce.Do(func() {
- file_pkg_services_control_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_types_proto_rawDescData)
- })
- return file_pkg_services_control_types_proto_rawDescData
-}
-
-var file_pkg_services_control_types_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
-var file_pkg_services_control_types_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
-var file_pkg_services_control_types_proto_goTypes = []interface{}{
- (NetmapStatus)(0), // 0: control.NetmapStatus
- (HealthStatus)(0), // 1: control.HealthStatus
- (ShardMode)(0), // 2: control.ShardMode
- (*Signature)(nil), // 3: control.Signature
- (*NodeInfo)(nil), // 4: control.NodeInfo
- (*Netmap)(nil), // 5: control.Netmap
- (*ShardInfo)(nil), // 6: control.ShardInfo
- (*BlobstorInfo)(nil), // 7: control.BlobstorInfo
- (*NodeInfo_Attribute)(nil), // 8: control.NodeInfo.Attribute
-}
-var file_pkg_services_control_types_proto_depIdxs = []int32{
- 8, // 0: control.NodeInfo.attributes:type_name -> control.NodeInfo.Attribute
- 0, // 1: control.NodeInfo.state:type_name -> control.NetmapStatus
- 4, // 2: control.Netmap.nodes:type_name -> control.NodeInfo
- 7, // 3: control.ShardInfo.blobstor:type_name -> control.BlobstorInfo
- 2, // 4: control.ShardInfo.mode:type_name -> control.ShardMode
- 5, // [5:5] is the sub-list for method output_type
- 5, // [5:5] is the sub-list for method input_type
- 5, // [5:5] is the sub-list for extension type_name
- 5, // [5:5] is the sub-list for extension extendee
- 0, // [0:5] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_control_types_proto_init() }
-func file_pkg_services_control_types_proto_init() {
- if File_pkg_services_control_types_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_control_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Signature); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Netmap); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BlobstorInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NodeInfo_Attribute); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_control_types_proto_rawDesc,
- NumEnums: 3,
- NumMessages: 6,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_pkg_services_control_types_proto_goTypes,
- DependencyIndexes: file_pkg_services_control_types_proto_depIdxs,
- EnumInfos: file_pkg_services_control_types_proto_enumTypes,
- MessageInfos: file_pkg_services_control_types_proto_msgTypes,
- }.Build()
- File_pkg_services_control_types_proto = out.File
- file_pkg_services_control_types_proto_rawDesc = nil
- file_pkg_services_control_types_proto_goTypes = nil
- file_pkg_services_control_types_proto_depIdxs = nil
-}
diff --git a/pkg/services/control/types.proto b/pkg/services/control/types.proto
index c85d672c27..d8135ed645 100644
--- a/pkg/services/control/types.proto
+++ b/pkg/services/control/types.proto
@@ -6,166 +6,189 @@ option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/con
// Signature of some message.
message Signature {
- // Public key used for signing.
- bytes key = 1 [json_name = "key"];
+ // Public key used for signing.
+ bytes key = 1 [ json_name = "key" ];
- // Binary signature.
- bytes sign = 2 [json_name = "signature"];
+ // Binary signature.
+ bytes sign = 2 [ json_name = "signature" ];
}
// Status of the storage node in the FrostFS network map.
enum NetmapStatus {
- // Undefined status, default value.
- STATUS_UNDEFINED = 0;
+ // Undefined status, default value.
+ STATUS_UNDEFINED = 0;
- // Node is online.
- ONLINE = 1;
+ // Node is online.
+ ONLINE = 1;
- // Node is offline.
- OFFLINE = 2;
+ // Node is offline.
+ OFFLINE = 2;
- // Node is maintained by the owner.
- MAINTENANCE = 3;
+ // Node is maintained by the owner.
+ MAINTENANCE = 3;
}
// FrostFS node description.
message NodeInfo {
- // Public key of the FrostFS node in a binary format.
- bytes public_key = 1 [json_name = "publicKey"];
+ // Public key of the FrostFS node in a binary format.
+ bytes public_key = 1 [ json_name = "publicKey" ];
- // Ways to connect to a node.
- repeated string addresses = 2 [json_name = "addresses"];
+ // Ways to connect to a node.
+ repeated string addresses = 2 [ json_name = "addresses" ];
- // Administrator-defined Attributes of the FrostFS Storage Node.
- //
- // `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8
- // string. Value can't be empty.
- //
- // Node's attributes are mostly used during Storage Policy evaluation to
- // calculate object's placement and find a set of nodes satisfying policy
- // requirements. There are some "well-known" node attributes common to all the
- // Storage Nodes in the network and used implicitly with default values if not
- // explicitly set:
- //
- // * Capacity \
+ // Administrator-defined Attributes of the FrostFS Storage Node.
+ //
+ // `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8
+ // string. Value can't be empty.
+ //
+ // Node's attributes are mostly used during Storage Policy evaluation to
+ // calculate object's placement and find a set of nodes satisfying policy
+ // requirements. There are some "well-known" node attributes common to all the
+ // Storage Nodes in the network and used implicitly with default values if not
+ // explicitly set:
+ //
+ // * Capacity \
// Total available disk space in Gigabytes.
- // * Price \
+ // * Price \
// Price in GAS tokens for storing one GB of data during one Epoch. In node
- // attributes it's a string presenting floating point number with comma or
- // point delimiter for decimal part. In the Network Map it will be saved as
- // 64-bit unsigned integer representing number of minimal token fractions.
- // * Subnet \
- // String ID of Node's storage subnet. There can be only one subnet served
- // by the Storage Node.
- // * Locode \
+ // attributes it's a string presenting floating point number with comma or
+ // point delimiter for decimal part. In the Network Map it will be saved as
+ // 64-bit unsigned integer representing number of minimal token fractions.
+ // * Locode \
// Node's geographic location in
- // [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html)
- // format approximated to the nearest point defined in standard.
- // * Country \
+ // [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html)
+ // format approximated to the nearest point defined in standard.
+ // * Country \
// Country code in
- // [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
- // format. Calculated automatically from `Locode` attribute
- // * Region \
+ // [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
+ // format. Calculated automatically from `Locode` attribute
+ // * Region \
// Country's administative subdivision where node is located. Calculated
- // automatically from `Locode` attribute based on `SubDiv` field. Presented
- // in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) format.
- // * City \
+ // automatically from `Locode` attribute based on `SubDiv` field. Presented
+ // in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) format.
+ // * City \
// City, town, village or rural area name where node is located written
- // without diacritics . Calculated automatically from `Locode` attribute.
- //
- // For detailed description of each well-known attribute please see the
- // corresponding section in FrostFS Technical specification.
- message Attribute {
- // Key of the node attribute.
- string key = 1 [json_name = "key"];
+ // without diacritics . Calculated automatically from `Locode` attribute.
+ //
+ // For detailed description of each well-known attribute please see the
+ // corresponding section in FrostFS Technical specification.
+ message Attribute {
+ // Key of the node attribute.
+ string key = 1 [ json_name = "key" ];
- // Value of the node attribute.
- string value = 2 [json_name = "value"];
+ // Value of the node attribute.
+ string value = 2 [ json_name = "value" ];
- // Parent keys, if any. For example for `City` it could be `Region` and
- // `Country`.
- repeated string parents = 3 [json_name = "parents"];
- }
- // Carries list of the FrostFS node attributes in a key-value form. Key name
- // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo
- // structures with duplicated attribute names or attributes with empty values
- // will be considered invalid.
- repeated Attribute attributes = 3 [json_name = "attributes"];
+ // Parent keys, if any. For example for `City` it could be `Region` and
+ // `Country`.
+ repeated string parents = 3 [ json_name = "parents" ];
+ }
+ // Carries list of the FrostFS node attributes in a key-value form. Key name
+ // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo
+ // structures with duplicated attribute names or attributes with empty values
+ // will be considered invalid.
+ repeated Attribute attributes = 3 [ json_name = "attributes" ];
- // Carries state of the FrostFS node.
- NetmapStatus state = 4 [json_name = "state"];
+ // Carries state of the FrostFS node.
+ NetmapStatus state = 4 [ json_name = "state" ];
}
// Network map structure.
message Netmap {
- // Network map revision number.
- uint64 epoch = 1 [json_name = "epoch"];
+ // Network map revision number.
+ uint64 epoch = 1 [ json_name = "epoch" ];
- // Nodes presented in network.
- repeated NodeInfo nodes = 2 [json_name = "nodes"];
+ // Nodes presented in network.
+ repeated NodeInfo nodes = 2 [ json_name = "nodes" ];
}
// Health status of the storage node application.
enum HealthStatus {
- // Undefined status, default value.
- HEALTH_STATUS_UNDEFINED = 0;
+ // Undefined status, default value.
+ HEALTH_STATUS_UNDEFINED = 0;
- // Storage node application is starting.
- STARTING = 1;
+ // Storage node application is starting.
+ STARTING = 1;
- // Storage node application is started and serves all services.
- READY = 2;
+ // Storage node application is started and serves all services.
+ READY = 2;
- // Storage node application is shutting down.
- SHUTTING_DOWN = 3;
+ // Storage node application is shutting down.
+ SHUTTING_DOWN = 3;
+
+ // Storage node application is reconfiguring.
+ RECONFIGURING = 4;
}
// Shard description.
message ShardInfo {
- // ID of the shard.
- bytes shard_ID = 1 [json_name = "shardID"];
+ // ID of the shard.
+ bytes shard_ID = 1 [ json_name = "shardID" ];
- // Path to shard's metabase.
- string metabase_path = 2 [json_name = "metabasePath"];
+ // Path to shard's metabase.
+ string metabase_path = 2 [ json_name = "metabasePath" ];
- // Shard's blobstor info.
- repeated BlobstorInfo blobstor = 3 [json_name = "blobstor"];
+ // Shard's blobstor info.
+ repeated BlobstorInfo blobstor = 3 [ json_name = "blobstor" ];
- // Path to shard's write-cache, empty if disabled.
- string writecache_path = 4 [json_name = "writecachePath"];
+ // Path to shard's write-cache, empty if disabled.
+ string writecache_path = 4 [ json_name = "writecachePath" ];
- // Work mode of the shard.
- ShardMode mode = 5;
+ // Work mode of the shard.
+ ShardMode mode = 5;
- // Amount of errors occured.
- uint32 errorCount = 6;
+ // Amount of errors occured.
+ uint32 errorCount = 6;
- // Path to shard's pilorama storage.
- string pilorama_path = 7 [json_name = "piloramaPath"];
+ // Path to shard's pilorama storage.
+ string pilorama_path = 7 [ json_name = "piloramaPath" ];
+
+ // Evacuation status.
+ bool evacuation_in_progress = 8 [ json_name = "evacuationInProgress" ];
}
// Blobstor component description.
message BlobstorInfo {
- // Path to the root.
- string path = 1 [json_name = "path"];
- // Component type.
- string type = 2 [json_name = "type"];
+ // Path to the root.
+ string path = 1 [ json_name = "path" ];
+ // Component type.
+ string type = 2 [ json_name = "type" ];
}
// Work mode of the shard.
enum ShardMode {
- // Undefined mode, default value.
- SHARD_MODE_UNDEFINED = 0;
+ // Undefined mode, default value.
+ SHARD_MODE_UNDEFINED = 0;
- // Read-write.
- READ_WRITE = 1;
+ // Read-write.
+ READ_WRITE = 1;
- // Read-only.
- READ_ONLY = 2;
+ // Read-only.
+ READ_ONLY = 2;
- // Degraded.
- DEGRADED = 3;
+ // Degraded.
+ DEGRADED = 3;
- // DegradedReadOnly.
- DEGRADED_READ_ONLY = 4;
+ // DegradedReadOnly.
+ DEGRADED_READ_ONLY = 4;
+}
+
+// ChainTarget is an object to which local overrides
+// are applied.
+message ChainTarget {
+ enum TargetType {
+ UNDEFINED = 0;
+
+ NAMESPACE = 1;
+
+ CONTAINER = 2;
+
+ USER = 3;
+
+ GROUP = 4;
+ }
+
+ TargetType type = 1;
+
+ string Name = 2;
}
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 1d05102b3b..69d87292d4 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -2,212 +2,1908 @@
package control
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
+)
+
+type NetmapStatus int32
+
+const (
+ NetmapStatus_STATUS_UNDEFINED NetmapStatus = 0
+ NetmapStatus_ONLINE NetmapStatus = 1
+ NetmapStatus_OFFLINE NetmapStatus = 2
+ NetmapStatus_MAINTENANCE NetmapStatus = 3
+)
+
+var (
+ NetmapStatus_name = map[int32]string{
+ 0: "STATUS_UNDEFINED",
+ 1: "ONLINE",
+ 2: "OFFLINE",
+ 3: "MAINTENANCE",
+ }
+ NetmapStatus_value = map[string]int32{
+ "STATUS_UNDEFINED": 0,
+ "ONLINE": 1,
+ "OFFLINE": 2,
+ "MAINTENANCE": 3,
+ }
+)
+
+func (x NetmapStatus) String() string {
+ if v, ok := NetmapStatus_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *NetmapStatus) FromString(s string) bool {
+ if v, ok := NetmapStatus_value[s]; ok {
+ *x = NetmapStatus(v)
+ return true
+ }
+ return false
+}
+
+type HealthStatus int32
+
+const (
+ HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
+ HealthStatus_STARTING HealthStatus = 1
+ HealthStatus_READY HealthStatus = 2
+ HealthStatus_SHUTTING_DOWN HealthStatus = 3
+ HealthStatus_RECONFIGURING HealthStatus = 4
+)
+
+var (
+ HealthStatus_name = map[int32]string{
+ 0: "HEALTH_STATUS_UNDEFINED",
+ 1: "STARTING",
+ 2: "READY",
+ 3: "SHUTTING_DOWN",
+ 4: "RECONFIGURING",
+ }
+ HealthStatus_value = map[string]int32{
+ "HEALTH_STATUS_UNDEFINED": 0,
+ "STARTING": 1,
+ "READY": 2,
+ "SHUTTING_DOWN": 3,
+ "RECONFIGURING": 4,
+ }
+)
+
+func (x HealthStatus) String() string {
+ if v, ok := HealthStatus_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *HealthStatus) FromString(s string) bool {
+ if v, ok := HealthStatus_value[s]; ok {
+ *x = HealthStatus(v)
+ return true
+ }
+ return false
+}
+
+type ShardMode int32
+
+const (
+ ShardMode_SHARD_MODE_UNDEFINED ShardMode = 0
+ ShardMode_READ_WRITE ShardMode = 1
+ ShardMode_READ_ONLY ShardMode = 2
+ ShardMode_DEGRADED ShardMode = 3
+ ShardMode_DEGRADED_READ_ONLY ShardMode = 4
+)
+
+var (
+ ShardMode_name = map[int32]string{
+ 0: "SHARD_MODE_UNDEFINED",
+ 1: "READ_WRITE",
+ 2: "READ_ONLY",
+ 3: "DEGRADED",
+ 4: "DEGRADED_READ_ONLY",
+ }
+ ShardMode_value = map[string]int32{
+ "SHARD_MODE_UNDEFINED": 0,
+ "READ_WRITE": 1,
+ "READ_ONLY": 2,
+ "DEGRADED": 3,
+ "DEGRADED_READ_ONLY": 4,
+ }
+)
+
+func (x ShardMode) String() string {
+ if v, ok := ShardMode_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *ShardMode) FromString(s string) bool {
+ if v, ok := ShardMode_value[s]; ok {
+ *x = ShardMode(v)
+ return true
+ }
+ return false
+}
+
+type Signature struct {
+ Key []byte `json:"key"`
+ Sign []byte `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*Signature)(nil)
+ _ encoding.ProtoUnmarshaler = (*Signature)(nil)
+ _ json.Marshaler = (*Signature)(nil)
+ _ json.Unmarshaler = (*Signature)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *Signature) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.Key)
size += proto.BytesSize(2, x.Sign)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *Signature) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Key)
- offset += proto.BytesMarshal(2, buf[offset:], x.Sign)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *Signature) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendBytes(1, x.Key)
+ }
+ if len(x.Sign) != 0 {
+ mm.AppendBytes(2, x.Sign)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *Signature) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "Signature")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Sign
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Sign")
+ }
+ x.Sign = data
+ }
+ }
+ return nil
+}
+func (x *Signature) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+func (x *Signature) SetKey(v []byte) {
+ x.Key = v
+}
+func (x *Signature) GetSign() []byte {
+ if x != nil {
+ return x.Sign
+ }
+ return nil
+}
+func (x *Signature) SetSign(v []byte) {
+ x.Sign = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *Signature) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ if x.Key != nil {
+ out.Base64Bytes(x.Key)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ if x.Sign != nil {
+ out.Base64Bytes(x.Sign)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *Signature) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Key = f
+ }
+ case "signature":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Sign = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type NodeInfo_Attribute struct {
+ Key string `json:"key"`
+ Value string `json:"value"`
+ Parents []string `json:"parents"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*NodeInfo_Attribute)(nil)
+ _ encoding.ProtoUnmarshaler = (*NodeInfo_Attribute)(nil)
+ _ json.Marshaler = (*NodeInfo_Attribute)(nil)
+ _ json.Unmarshaler = (*NodeInfo_Attribute)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *NodeInfo_Attribute) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.StringSize(1, x.Key)
size += proto.StringSize(2, x.Value)
size += proto.RepeatedStringSize(3, x.Parents)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *NodeInfo_Attribute) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.StringMarshal(1, buf[offset:], x.Key)
- offset += proto.StringMarshal(2, buf[offset:], x.Value)
- offset += proto.RepeatedStringMarshal(3, buf[offset:], x.Parents)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *NodeInfo_Attribute) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *NodeInfo_Attribute) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendString(1, x.Key)
+ }
+ if len(x.Value) != 0 {
+ mm.AppendString(2, x.Value)
+ }
+ for j := range x.Parents {
+ mm.AppendString(3, x.Parents[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *NodeInfo_Attribute) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "NodeInfo_Attribute")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Value
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Value")
+ }
+ x.Value = data
+ case 3: // Parents
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Parents")
+ }
+ x.Parents = append(x.Parents, data)
+ }
+ }
+ return nil
+}
+func (x *NodeInfo_Attribute) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+func (x *NodeInfo_Attribute) SetKey(v string) {
+ x.Key = v
+}
+func (x *NodeInfo_Attribute) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+func (x *NodeInfo_Attribute) SetValue(v string) {
+ x.Value = v
+}
+func (x *NodeInfo_Attribute) GetParents() []string {
+ if x != nil {
+ return x.Parents
+ }
+ return nil
+}
+func (x *NodeInfo_Attribute) SetParents(v []string) {
+ x.Parents = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *NodeInfo_Attribute) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *NodeInfo_Attribute) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ out.String(x.Key)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"value\":"
+ out.RawString(prefix)
+ out.String(x.Value)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parents\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Parents {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Parents[i])
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *NodeInfo_Attribute) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *NodeInfo_Attribute) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f string
+ f = in.String()
+ x.Key = f
+ }
+ case "value":
+ {
+ var f string
+ f = in.String()
+ x.Value = f
+ }
+ case "parents":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Parents = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type NodeInfo struct {
+ PublicKey []byte `json:"publicKey"`
+ Addresses []string `json:"addresses"`
+ Attributes []NodeInfo_Attribute `json:"attributes"`
+ State NetmapStatus `json:"state"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*NodeInfo)(nil)
+ _ encoding.ProtoUnmarshaler = (*NodeInfo)(nil)
+ _ json.Marshaler = (*NodeInfo)(nil)
+ _ json.Unmarshaler = (*NodeInfo)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *NodeInfo) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.PublicKey)
size += proto.RepeatedStringSize(2, x.Addresses)
for i := range x.Attributes {
- size += proto.NestedStructureSize(3, x.Attributes[i])
+ size += proto.NestedStructureSizeUnchecked(3, &x.Attributes[i])
}
size += proto.EnumSize(4, int32(x.State))
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *NodeInfo) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.PublicKey)
- offset += proto.RepeatedStringMarshal(2, buf[offset:], x.Addresses)
- for i := range x.Attributes {
- offset += proto.NestedStructureMarshal(3, buf[offset:], x.Attributes[i])
- }
- offset += proto.EnumMarshal(4, buf[offset:], int32(x.State))
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *NodeInfo) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *NodeInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.PublicKey) != 0 {
+ mm.AppendBytes(1, x.PublicKey)
+ }
+ for j := range x.Addresses {
+ mm.AppendString(2, x.Addresses[j])
+ }
+ for i := range x.Attributes {
+ x.Attributes[i].EmitProtobuf(mm.AppendMessage(3))
+ }
+ if int32(x.State) != 0 {
+ mm.AppendInt32(4, int32(x.State))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *NodeInfo) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "NodeInfo")
+ }
+ switch fc.FieldNum {
+ case 1: // PublicKey
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "PublicKey")
+ }
+ x.PublicKey = data
+ case 2: // Addresses
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Addresses")
+ }
+ x.Addresses = append(x.Addresses, data)
+ case 3: // Attributes
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Attributes")
+ }
+ x.Attributes = append(x.Attributes, NodeInfo_Attribute{})
+ ff := &x.Attributes[len(x.Attributes)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 4: // State
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "State")
+ }
+ x.State = NetmapStatus(data)
+ }
+ }
+ return nil
+}
+func (x *NodeInfo) GetPublicKey() []byte {
+ if x != nil {
+ return x.PublicKey
+ }
+ return nil
+}
+func (x *NodeInfo) SetPublicKey(v []byte) {
+ x.PublicKey = v
+}
+func (x *NodeInfo) GetAddresses() []string {
+ if x != nil {
+ return x.Addresses
+ }
+ return nil
+}
+func (x *NodeInfo) SetAddresses(v []string) {
+ x.Addresses = v
+}
+func (x *NodeInfo) GetAttributes() []NodeInfo_Attribute {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+func (x *NodeInfo) SetAttributes(v []NodeInfo_Attribute) {
+ x.Attributes = v
+}
+func (x *NodeInfo) GetState() NetmapStatus {
+ if x != nil {
+ return x.State
+ }
+ return 0
+}
+func (x *NodeInfo) SetState(v NetmapStatus) {
+ x.State = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *NodeInfo) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"publicKey\":"
+ out.RawString(prefix)
+ if x.PublicKey != nil {
+ out.Base64Bytes(x.PublicKey)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"addresses\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Addresses {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Addresses[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"attributes\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Attributes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Attributes[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"state\":"
+ out.RawString(prefix)
+ v := int32(x.State)
+ if vv, ok := NetmapStatus_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *NodeInfo) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "publicKey":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.PublicKey = f
+ }
+ case "addresses":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Addresses = list
+ in.Delim(']')
+ }
+ case "attributes":
+ {
+ var f NodeInfo_Attribute
+ var list []NodeInfo_Attribute
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = NodeInfo_Attribute{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Attributes = list
+ in.Delim(']')
+ }
+ case "state":
+ {
+ var f NetmapStatus
+ var parsedValue NetmapStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := NetmapStatus_value[v]; ok {
+ parsedValue = NetmapStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = NetmapStatus(vv)
+ case float64:
+ parsedValue = NetmapStatus(v)
+ }
+ f = parsedValue
+ x.State = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type Netmap struct {
+ Epoch uint64 `json:"epoch"`
+ Nodes []NodeInfo `json:"nodes"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*Netmap)(nil)
+ _ encoding.ProtoUnmarshaler = (*Netmap)(nil)
+ _ json.Marshaler = (*Netmap)(nil)
+ _ json.Unmarshaler = (*Netmap)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *Netmap) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.UInt64Size(1, x.Epoch)
for i := range x.Nodes {
- size += proto.NestedStructureSize(2, x.Nodes[i])
+ size += proto.NestedStructureSizeUnchecked(2, &x.Nodes[i])
}
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *Netmap) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.Epoch)
- for i := range x.Nodes {
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Nodes[i])
- }
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *Netmap) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *Netmap) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Epoch != 0 {
+ mm.AppendUint64(1, x.Epoch)
+ }
+ for i := range x.Nodes {
+ x.Nodes[i].EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *Netmap) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "Netmap")
+ }
+ switch fc.FieldNum {
+ case 1: // Epoch
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Epoch")
+ }
+ x.Epoch = data
+ case 2: // Nodes
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Nodes")
+ }
+ x.Nodes = append(x.Nodes, NodeInfo{})
+ ff := &x.Nodes[len(x.Nodes)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *Netmap) GetEpoch() uint64 {
+ if x != nil {
+ return x.Epoch
+ }
+ return 0
+}
+func (x *Netmap) SetEpoch(v uint64) {
+ x.Epoch = v
+}
+func (x *Netmap) GetNodes() []NodeInfo {
+ if x != nil {
+ return x.Nodes
+ }
+ return nil
+}
+func (x *Netmap) SetNodes(v []NodeInfo) {
+ x.Nodes = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *Netmap) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *Netmap) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"epoch\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodes\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Nodes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Nodes[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *Netmap) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "epoch":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.Epoch = f
+ }
+ case "nodes":
+ {
+ var f NodeInfo
+ var list []NodeInfo
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = NodeInfo{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Nodes = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ShardInfo struct {
+ Shard_ID []byte `json:"shardID"`
+ MetabasePath string `json:"metabasePath"`
+ Blobstor []BlobstorInfo `json:"blobstor"`
+ WritecachePath string `json:"writecachePath"`
+ Mode ShardMode `json:"mode"`
+ ErrorCount uint32 `json:"errorCount"`
+ PiloramaPath string `json:"piloramaPath"`
+ EvacuationInProgress bool `json:"evacuationInProgress"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ShardInfo)(nil)
+ _ encoding.ProtoUnmarshaler = (*ShardInfo)(nil)
+ _ json.Marshaler = (*ShardInfo)(nil)
+ _ json.Unmarshaler = (*ShardInfo)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *ShardInfo) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.Shard_ID)
size += proto.StringSize(2, x.MetabasePath)
for i := range x.Blobstor {
- size += proto.NestedStructureSize(3, x.Blobstor[i])
+ size += proto.NestedStructureSizeUnchecked(3, &x.Blobstor[i])
}
size += proto.StringSize(4, x.WritecachePath)
size += proto.EnumSize(5, int32(x.Mode))
size += proto.UInt32Size(6, x.ErrorCount)
size += proto.StringSize(7, x.PiloramaPath)
+ size += proto.BoolSize(8, x.EvacuationInProgress)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ShardInfo) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.StringMarshal(2, buf[offset:], x.MetabasePath)
- for i := range x.Blobstor {
- offset += proto.NestedStructureMarshal(3, buf[offset:], x.Blobstor[i])
- }
- offset += proto.StringMarshal(4, buf[offset:], x.WritecachePath)
- offset += proto.EnumMarshal(5, buf[offset:], int32(x.Mode))
- offset += proto.UInt32Marshal(6, buf[offset:], x.ErrorCount)
- offset += proto.StringMarshal(7, buf[offset:], x.PiloramaPath)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ShardInfo) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Shard_ID) != 0 {
+ mm.AppendBytes(1, x.Shard_ID)
+ }
+ if len(x.MetabasePath) != 0 {
+ mm.AppendString(2, x.MetabasePath)
+ }
+ for i := range x.Blobstor {
+ x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3))
+ }
+ if len(x.WritecachePath) != 0 {
+ mm.AppendString(4, x.WritecachePath)
+ }
+ if int32(x.Mode) != 0 {
+ mm.AppendInt32(5, int32(x.Mode))
+ }
+ if x.ErrorCount != 0 {
+ mm.AppendUint32(6, x.ErrorCount)
+ }
+ if len(x.PiloramaPath) != 0 {
+ mm.AppendString(7, x.PiloramaPath)
+ }
+ if x.EvacuationInProgress {
+ mm.AppendBool(8, x.EvacuationInProgress)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ShardInfo) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ShardInfo")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = data
+ case 2: // MetabasePath
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "MetabasePath")
+ }
+ x.MetabasePath = data
+ case 3: // Blobstor
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Blobstor")
+ }
+ x.Blobstor = append(x.Blobstor, BlobstorInfo{})
+ ff := &x.Blobstor[len(x.Blobstor)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 4: // WritecachePath
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "WritecachePath")
+ }
+ x.WritecachePath = data
+ case 5: // Mode
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Mode")
+ }
+ x.Mode = ShardMode(data)
+ case 6: // ErrorCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ErrorCount")
+ }
+ x.ErrorCount = data
+ case 7: // PiloramaPath
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "PiloramaPath")
+ }
+ x.PiloramaPath = data
+ case 8: // EvacuationInProgress
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "EvacuationInProgress")
+ }
+ x.EvacuationInProgress = data
+ }
+ }
+ return nil
+}
+func (x *ShardInfo) GetShard_ID() []byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *ShardInfo) SetShard_ID(v []byte) {
+ x.Shard_ID = v
+}
+func (x *ShardInfo) GetMetabasePath() string {
+ if x != nil {
+ return x.MetabasePath
+ }
+ return ""
+}
+func (x *ShardInfo) SetMetabasePath(v string) {
+ x.MetabasePath = v
+}
+func (x *ShardInfo) GetBlobstor() []BlobstorInfo {
+ if x != nil {
+ return x.Blobstor
+ }
+ return nil
+}
+func (x *ShardInfo) SetBlobstor(v []BlobstorInfo) {
+ x.Blobstor = v
+}
+func (x *ShardInfo) GetWritecachePath() string {
+ if x != nil {
+ return x.WritecachePath
+ }
+ return ""
+}
+func (x *ShardInfo) SetWritecachePath(v string) {
+ x.WritecachePath = v
+}
+func (x *ShardInfo) GetMode() ShardMode {
+ if x != nil {
+ return x.Mode
+ }
+ return 0
+}
+func (x *ShardInfo) SetMode(v ShardMode) {
+ x.Mode = v
+}
+func (x *ShardInfo) GetErrorCount() uint32 {
+ if x != nil {
+ return x.ErrorCount
+ }
+ return 0
+}
+func (x *ShardInfo) SetErrorCount(v uint32) {
+ x.ErrorCount = v
+}
+func (x *ShardInfo) GetPiloramaPath() string {
+ if x != nil {
+ return x.PiloramaPath
+ }
+ return ""
+}
+func (x *ShardInfo) SetPiloramaPath(v string) {
+ x.PiloramaPath = v
+}
+func (x *ShardInfo) GetEvacuationInProgress() bool {
+ if x != nil {
+ return x.EvacuationInProgress
+ }
+ return false
+}
+func (x *ShardInfo) SetEvacuationInProgress(v bool) {
+ x.EvacuationInProgress = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ShardInfo) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"shardID\":"
+ out.RawString(prefix)
+ if x.Shard_ID != nil {
+ out.Base64Bytes(x.Shard_ID)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"metabasePath\":"
+ out.RawString(prefix)
+ out.String(x.MetabasePath)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"blobstor\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Blobstor {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Blobstor[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"writecachePath\":"
+ out.RawString(prefix)
+ out.String(x.WritecachePath)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"mode\":"
+ out.RawString(prefix)
+ v := int32(x.Mode)
+ if vv, ok := ShardMode_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"errorCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ErrorCount)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"piloramaPath\":"
+ out.RawString(prefix)
+ out.String(x.PiloramaPath)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"evacuationInProgress\":"
+ out.RawString(prefix)
+ out.Bool(x.EvacuationInProgress)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ShardInfo) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Shard_ID = f
+ }
+ case "metabasePath":
+ {
+ var f string
+ f = in.String()
+ x.MetabasePath = f
+ }
+ case "blobstor":
+ {
+ var f BlobstorInfo
+ var list []BlobstorInfo
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = BlobstorInfo{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Blobstor = list
+ in.Delim(']')
+ }
+ case "writecachePath":
+ {
+ var f string
+ f = in.String()
+ x.WritecachePath = f
+ }
+ case "mode":
+ {
+ var f ShardMode
+ var parsedValue ShardMode
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := ShardMode_value[v]; ok {
+ parsedValue = ShardMode(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = ShardMode(vv)
+ case float64:
+ parsedValue = ShardMode(v)
+ }
+ f = parsedValue
+ x.Mode = f
+ }
+ case "errorCount":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.ErrorCount = f
+ }
+ case "piloramaPath":
+ {
+ var f string
+ f = in.String()
+ x.PiloramaPath = f
+ }
+ case "evacuationInProgress":
+ {
+ var f bool
+ f = in.Bool()
+ x.EvacuationInProgress = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type BlobstorInfo struct {
+ Path string `json:"path"`
+ Type string `json:"type"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*BlobstorInfo)(nil)
+ _ encoding.ProtoUnmarshaler = (*BlobstorInfo)(nil)
+ _ json.Marshaler = (*BlobstorInfo)(nil)
+ _ json.Unmarshaler = (*BlobstorInfo)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *BlobstorInfo) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.StringSize(1, x.Path)
size += proto.StringSize(2, x.Type)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *BlobstorInfo) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.StringMarshal(1, buf[offset:], x.Path)
- offset += proto.StringMarshal(2, buf[offset:], x.Type)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *BlobstorInfo) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *BlobstorInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Path) != 0 {
+ mm.AppendString(1, x.Path)
+ }
+ if len(x.Type) != 0 {
+ mm.AppendString(2, x.Type)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *BlobstorInfo) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "BlobstorInfo")
+ }
+ switch fc.FieldNum {
+ case 1: // Path
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Path")
+ }
+ x.Path = data
+ case 2: // Type
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Type")
+ }
+ x.Type = data
+ }
+ }
+ return nil
+}
+func (x *BlobstorInfo) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+func (x *BlobstorInfo) SetPath(v string) {
+ x.Path = v
+}
+func (x *BlobstorInfo) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+func (x *BlobstorInfo) SetType(v string) {
+ x.Type = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *BlobstorInfo) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *BlobstorInfo) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"path\":"
+ out.RawString(prefix)
+ out.String(x.Path)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"type\":"
+ out.RawString(prefix)
+ out.String(x.Type)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *BlobstorInfo) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *BlobstorInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "path":
+ {
+ var f string
+ f = in.String()
+ x.Path = f
+ }
+ case "type":
+ {
+ var f string
+ f = in.String()
+ x.Type = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ChainTarget_TargetType int32
+
+const (
+ ChainTarget_UNDEFINED ChainTarget_TargetType = 0
+ ChainTarget_NAMESPACE ChainTarget_TargetType = 1
+ ChainTarget_CONTAINER ChainTarget_TargetType = 2
+ ChainTarget_USER ChainTarget_TargetType = 3
+ ChainTarget_GROUP ChainTarget_TargetType = 4
+)
+
+var (
+ ChainTarget_TargetType_name = map[int32]string{
+ 0: "UNDEFINED",
+ 1: "NAMESPACE",
+ 2: "CONTAINER",
+ 3: "USER",
+ 4: "GROUP",
+ }
+ ChainTarget_TargetType_value = map[string]int32{
+ "UNDEFINED": 0,
+ "NAMESPACE": 1,
+ "CONTAINER": 2,
+ "USER": 3,
+ "GROUP": 4,
+ }
+)
+
+func (x ChainTarget_TargetType) String() string {
+ if v, ok := ChainTarget_TargetType_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *ChainTarget_TargetType) FromString(s string) bool {
+ if v, ok := ChainTarget_TargetType_value[s]; ok {
+ *x = ChainTarget_TargetType(v)
+ return true
+ }
+ return false
+}
+
+type ChainTarget struct {
+ Type ChainTarget_TargetType `json:"type"`
+ Name string `json:"Name"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ChainTarget)(nil)
+ _ encoding.ProtoUnmarshaler = (*ChainTarget)(nil)
+ _ json.Marshaler = (*ChainTarget)(nil)
+ _ json.Unmarshaler = (*ChainTarget)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *ChainTarget) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.EnumSize(1, int32(x.Type))
+ size += proto.StringSize(2, x.Name)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ChainTarget) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ChainTarget) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.Type) != 0 {
+ mm.AppendInt32(1, int32(x.Type))
+ }
+ if len(x.Name) != 0 {
+ mm.AppendString(2, x.Name)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ChainTarget) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ChainTarget")
+ }
+ switch fc.FieldNum {
+ case 1: // Type
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Type")
+ }
+ x.Type = ChainTarget_TargetType(data)
+ case 2: // Name
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Name")
+ }
+ x.Name = data
+ }
+ }
+ return nil
+}
+func (x *ChainTarget) GetType() ChainTarget_TargetType {
+ if x != nil {
+ return x.Type
+ }
+ return 0
+}
+func (x *ChainTarget) SetType(v ChainTarget_TargetType) {
+ x.Type = v
+}
+func (x *ChainTarget) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+func (x *ChainTarget) SetName(v string) {
+ x.Name = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ChainTarget) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ChainTarget) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"type\":"
+ out.RawString(prefix)
+ v := int32(x.Type)
+ if vv, ok := ChainTarget_TargetType_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"Name\":"
+ out.RawString(prefix)
+ out.String(x.Name)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ChainTarget) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ChainTarget) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "type":
+ {
+ var f ChainTarget_TargetType
+ var parsedValue ChainTarget_TargetType
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := ChainTarget_TargetType_value[v]; ok {
+ parsedValue = ChainTarget_TargetType(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = ChainTarget_TargetType(vv)
+ case float64:
+ parsedValue = ChainTarget_TargetType(v)
+ }
+ f = parsedValue
+ x.Type = f
+ }
+ case "Name":
+ {
+ var f string
+ f = in.String()
+ x.Name = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/control/types_test.go b/pkg/services/control/types_test.go
deleted file mode 100644
index 1505a985c4..0000000000
--- a/pkg/services/control/types_test.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package control_test
-
-import (
- "bytes"
- "path/filepath"
- "strconv"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "github.com/google/uuid"
-)
-
-func TestNetmap_StableMarshal(t *testing.T) {
- testStableMarshal(t, generateNetmap(), new(control.Netmap), func(m1, m2 protoMessage) bool {
- return equalNetmaps(m1.(*control.Netmap), m2.(*control.Netmap))
- })
-}
-
-func generateNetmap() *control.Netmap {
- nm := new(control.Netmap)
- nm.SetEpoch(13)
-
- const nodeCount = 2
-
- nodes := make([]*control.NodeInfo, 0, nodeCount)
-
- for i := 0; i < nodeCount; i++ {
- n := new(control.NodeInfo)
- n.SetPublicKey(testData(33))
- n.SetAddresses([]string{testString(), testString()})
- n.SetState(control.NetmapStatus_ONLINE)
-
- const attrCount = 2
-
- attrs := make([]*control.NodeInfo_Attribute, 0, attrCount)
-
- for j := 0; j < attrCount; j++ {
- a := new(control.NodeInfo_Attribute)
- a.SetKey(testString())
- a.SetValue(testString())
-
- const parentsCount = 2
-
- parents := make([]string, 0, parentsCount)
-
- for k := 0; k < parentsCount; k++ {
- parents = append(parents, testString())
- }
-
- a.SetParents(parents)
-
- attrs = append(attrs, a)
- }
-
- n.SetAttributes(attrs)
-
- nodes = append(nodes, n)
- }
-
- nm.SetNodes(nodes)
-
- return nm
-}
-
-func equalNetmaps(nm1, nm2 *control.Netmap) bool {
- if nm1.GetEpoch() != nm2.GetEpoch() {
- return false
- }
-
- n1, n2 := nm1.GetNodes(), nm2.GetNodes()
-
- if len(n1) != len(n2) {
- return false
- }
-
- for i := range n1 {
- if !equalNodeInfos(n1[i], n2[i]) {
- return false
- }
- }
-
- return true
-}
-
-func equalNodeInfos(n1, n2 *control.NodeInfo) bool {
- if !bytes.Equal(n1.GetPublicKey(), n2.GetPublicKey()) ||
- n1.GetState() != n2.GetState() {
- return false
- }
-
- na1, na2 := n1.GetAddresses(), n2.GetAddresses()
-
- if len(na1) != len(na2) {
- return false
- }
-
- for i := range na1 {
- if na1[i] != na2[i] {
- return false
- }
- }
-
- a1, a2 := n1.GetAttributes(), n2.GetAttributes()
-
- if len(a1) != len(a2) {
- return false
- }
-
- for i := range a1 {
- if a1[i].GetKey() != a2[i].GetKey() || a1[i].GetValue() != a2[i].GetValue() {
- return false
- }
-
- p1, p2 := a1[i].GetParents(), a2[i].GetParents()
-
- if len(p1) != len(p2) {
- return false
- }
-
- for j := range p1 {
- if p1[j] != p2[j] {
- return false
- }
- }
- }
-
- return true
-}
-
-func generateShardInfo(id int) *control.ShardInfo {
- si := new(control.ShardInfo)
-
- path := "/nice/dir/awesome/files/" + strconv.Itoa(id)
-
- uid, _ := uuid.NewRandom()
- bin, _ := uid.MarshalBinary()
-
- si.SetID(bin)
- si.SetMode(control.ShardMode_READ_WRITE)
- si.SetMetabasePath(filepath.Join(path, "meta"))
- si.Blobstor = []*control.BlobstorInfo{
- {Type: fstree.Type, Path: filepath.Join(path, "fstree")},
- {Type: blobovniczatree.Type, Path: filepath.Join(path, "blobtree")}}
- si.SetWriteCachePath(filepath.Join(path, "writecache"))
- si.SetPiloramaPath(filepath.Join(path, "pilorama"))
-
- return si
-}
diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go
index 9fa3d767f6..1b92fdaade 100644
--- a/pkg/services/netmap/executor.go
+++ b/pkg/services/netmap/executor.go
@@ -5,9 +5,11 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
versionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
@@ -18,14 +20,16 @@ type executorSvc struct {
state NodeState
netInfo NetworkInfo
+
+ respSvc *response.Service
}
// NodeState encapsulates information
// about current node state.
type NodeState interface {
- // Must return current node state
+ // LocalNodeInfo must return current node state
// in FrostFS API v2 NodeInfo structure.
- LocalNodeInfo() (*netmap.NodeInfo, error)
+ LocalNodeInfo() *netmapSDK.NodeInfo
// ReadCurrentNetMap reads current local network map of the storage node
// into the given parameter. Returns any error encountered which prevented
@@ -36,21 +40,24 @@ type NodeState interface {
// NetworkInfo encapsulates source of the
// recent information about the FrostFS network.
type NetworkInfo interface {
- // Must return recent network information in FrostFS API v2 NetworkInfo structure.
+ // Dump must return recent network information in FrostFS API v2 NetworkInfo structure.
//
// If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset.
- Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error)
+ Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error)
}
-func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo) Server {
- if s == nil || netInfo == nil || !version.IsValid(v) {
- // this should never happen, otherwise it programmers bug
- panic("can't create netmap execution service")
- }
+func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server {
+ // this should never happen, otherwise it's a programmer's bug
+ msg := "BUG: can't create netmap execution service"
+ assert.False(s == nil, msg, "node state is nil")
+ assert.False(netInfo == nil, msg, "network info is nil")
+ assert.False(respSvc == nil, msg, "response service is nil")
+ assert.True(version.IsValid(v), msg, "invalid version")
res := &executorSvc{
state: s,
netInfo: netInfo,
+ respSvc: respSvc,
}
v.WriteToV2(&res.version)
@@ -60,48 +67,27 @@ func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo)
func (s *executorSvc) LocalNodeInfo(
_ context.Context,
- req *netmap.LocalNodeInfoRequest) (*netmap.LocalNodeInfoResponse, error) {
- verV2 := req.GetMetaHeader().GetVersion()
- if verV2 == nil {
- return nil, errors.New("missing version")
- }
-
- var ver versionsdk.Version
- if err := ver.ReadFromV2(*verV2); err != nil {
- return nil, fmt.Errorf("can't read version: %w", err)
- }
-
- ni, err := s.state.LocalNodeInfo()
- if err != nil {
- return nil, err
- }
-
- if addrNum := ni.NumberOfAddresses(); addrNum > 0 && ver.Minor() <= 7 {
- ni2 := new(netmap.NodeInfo)
- ni2.SetPublicKey(ni.GetPublicKey())
- ni2.SetState(ni.GetState())
- ni2.SetAttributes(ni.GetAttributes())
- ni.IterateAddresses(func(s string) bool {
- ni2.SetAddresses(s)
- return true
- })
-
- ni = ni2
- }
+ _ *netmap.LocalNodeInfoRequest,
+) (*netmap.LocalNodeInfoResponse, error) {
+ ni := s.state.LocalNodeInfo()
+ var nodeInfo netmap.NodeInfo
+ ni.WriteToV2(&nodeInfo)
body := new(netmap.LocalNodeInfoResponseBody)
body.SetVersion(&s.version)
- body.SetNodeInfo(ni)
+ body.SetNodeInfo(&nodeInfo)
resp := new(netmap.LocalNodeInfoResponse)
resp.SetBody(body)
+ s.respSvc.SetMeta(resp)
return resp, nil
}
func (s *executorSvc) NetworkInfo(
- _ context.Context,
- req *netmap.NetworkInfoRequest) (*netmap.NetworkInfoResponse, error) {
+ ctx context.Context,
+ req *netmap.NetworkInfoRequest,
+) (*netmap.NetworkInfoResponse, error) {
verV2 := req.GetMetaHeader().GetVersion()
if verV2 == nil {
return nil, errors.New("missing protocol version in meta header")
@@ -112,7 +98,7 @@ func (s *executorSvc) NetworkInfo(
return nil, fmt.Errorf("can't read version: %w", err)
}
- ni, err := s.netInfo.Dump(ver)
+ ni, err := s.netInfo.Dump(ctx, ver)
if err != nil {
return nil, err
}
@@ -126,10 +112,11 @@ func (s *executorSvc) NetworkInfo(
resp := new(netmap.NetworkInfoResponse)
resp.SetBody(body)
+ s.respSvc.SetMeta(resp)
return resp, nil
}
-func (s *executorSvc) Snapshot(_ context.Context, req *netmap.SnapshotRequest) (*netmap.SnapshotResponse, error) {
+func (s *executorSvc) Snapshot(_ context.Context, _ *netmap.SnapshotRequest) (*netmap.SnapshotResponse, error) {
var nm netmap.NetMap
err := s.state.ReadCurrentNetMap(&nm)
@@ -143,5 +130,6 @@ func (s *executorSvc) Snapshot(_ context.Context, req *netmap.SnapshotRequest) (
resp := new(netmap.SnapshotResponse)
resp.SetBody(body)
+ s.respSvc.SetMeta(resp)
return resp, nil
}
diff --git a/pkg/services/netmap/response.go b/pkg/services/netmap/response.go
deleted file mode 100644
index 8b035e461b..0000000000
--- a/pkg/services/netmap/response.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package netmap
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
-)
-
-type responseService struct {
- respSvc *response.Service
-
- svc Server
-}
-
-// NewResponseService returns netmap service instance that passes internal service
-// call to response service.
-func NewResponseService(nmSvc Server, respSvc *response.Service) Server {
- return &responseService{
- respSvc: respSvc,
- svc: nmSvc,
- }
-}
-
-func (s *responseService) LocalNodeInfo(ctx context.Context, req *netmap.LocalNodeInfoRequest) (*netmap.LocalNodeInfoResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.LocalNodeInfo(ctx, req.(*netmap.LocalNodeInfoRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*netmap.LocalNodeInfoResponse), nil
-}
-
-func (s *responseService) NetworkInfo(ctx context.Context, req *netmap.NetworkInfoRequest) (*netmap.NetworkInfoResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.NetworkInfo(ctx, req.(*netmap.NetworkInfoRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*netmap.NetworkInfoResponse), nil
-}
-
-func (s *responseService) Snapshot(ctx context.Context, req *netmap.SnapshotRequest) (*netmap.SnapshotResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Snapshot(ctx, req.(*netmap.SnapshotRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*netmap.SnapshotResponse), nil
-}
diff --git a/pkg/services/netmap/server.go b/pkg/services/netmap/server.go
index 0a09c9f44f..eff880dbee 100644
--- a/pkg/services/netmap/server.go
+++ b/pkg/services/netmap/server.go
@@ -3,7 +3,7 @@ package netmap
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
)
// Server is an interface of the FrostFS API Netmap service server.
diff --git a/pkg/services/netmap/sign.go b/pkg/services/netmap/sign.go
index 85b19d8622..5f184d5c0f 100644
--- a/pkg/services/netmap/sign.go
+++ b/pkg/services/netmap/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
)
type signService struct {
@@ -23,50 +23,30 @@ func NewSignService(key *ecdsa.PrivateKey, svc Server) Server {
func (s *signService) LocalNodeInfo(
ctx context.Context,
- req *netmap.LocalNodeInfoRequest) (*netmap.LocalNodeInfoResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.LocalNodeInfo(ctx, req.(*netmap.LocalNodeInfoRequest))
- },
- func() util.ResponseMessage {
- return new(netmap.LocalNodeInfoResponse)
- },
- )
- if err != nil {
- return nil, err
+ req *netmap.LocalNodeInfoRequest,
+) (*netmap.LocalNodeInfoResponse, error) {
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(netmap.LocalNodeInfoResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*netmap.LocalNodeInfoResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.LocalNodeInfo(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
func (s *signService) NetworkInfo(ctx context.Context, req *netmap.NetworkInfoRequest) (*netmap.NetworkInfoResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.NetworkInfo(ctx, req.(*netmap.NetworkInfoRequest))
- },
- func() util.ResponseMessage {
- return new(netmap.NetworkInfoResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(netmap.NetworkInfoResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*netmap.NetworkInfoResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.NetworkInfo(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
func (s *signService) Snapshot(ctx context.Context, req *netmap.SnapshotRequest) (*netmap.SnapshotResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Snapshot(ctx, req.(*netmap.SnapshotRequest))
- },
- func() util.ResponseMessage {
- return new(netmap.SnapshotResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(netmap.SnapshotResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*netmap.SnapshotResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.Snapshot(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
diff --git a/pkg/services/notificator/deps.go b/pkg/services/notificator/deps.go
deleted file mode 100644
index ded4b4b7da..0000000000
--- a/pkg/services/notificator/deps.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package notificator
-
-import (
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-// NotificationSource is a source of object notifications.
-type NotificationSource interface {
- // Iterate must iterate over all notifications for the
- // provided epoch and call handler for all of them.
- Iterate(epoch uint64, handler func(topic string, addr oid.Address))
-}
-
-// NotificationWriter notifies all the subscribers
-// about new object notifications.
-type NotificationWriter interface {
- // Notify must notify about an event generated
- // from an object with a specific topic.
- Notify(topic string, address oid.Address)
-}
diff --git a/pkg/services/notificator/nats/options.go b/pkg/services/notificator/nats/options.go
deleted file mode 100644
index c9ba2ed260..0000000000
--- a/pkg/services/notificator/nats/options.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package nats
-
-import (
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/nats-io/nats.go"
-)
-
-func WithClientCert(certPath, keyPath string) Option {
- return func(o *opts) {
- o.nOpts = append(o.nOpts, nats.ClientCert(certPath, keyPath))
- }
-}
-
-func WithRootCA(paths ...string) Option {
- return func(o *opts) {
- o.nOpts = append(o.nOpts, nats.RootCAs(paths...))
- }
-}
-
-func WithTimeout(timeout time.Duration) Option {
- return func(o *opts) {
- o.nOpts = append(o.nOpts, nats.Timeout(timeout))
- }
-}
-
-func WithConnectionName(name string) Option {
- return func(o *opts) {
- o.nOpts = append(o.nOpts, nats.Name(name))
- }
-}
-
-func WithLogger(logger *logger.Logger) Option {
- return func(o *opts) {
- o.log = logger
- }
-}
diff --git a/pkg/services/notificator/nats/service.go b/pkg/services/notificator/nats/service.go
deleted file mode 100644
index 54eb373ec5..0000000000
--- a/pkg/services/notificator/nats/service.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package nats
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nats-io/nats.go"
- "go.uber.org/zap"
-)
-
-// Writer is a NATS object notification writer.
-// It handles NATS JetStream connections and allows
-// sending string representation of the address to
-// the NATS server.
-//
-// For correct operation must be created via New function.
-// new(Writer) or Writer{} construction leads to undefined
-// behaviour and is not safe.
-type Writer struct {
- js nats.JetStreamContext
- nc *nats.Conn
-
- m *sync.RWMutex
- createdStreams map[string]struct{}
- opts
-}
-
-type opts struct {
- log *logger.Logger
- nOpts []nats.Option
-}
-
-type Option func(*opts)
-
-var errConnIsClosed = errors.New("connection to the server is closed")
-
-// Notify sends object address's string representation to the provided topic.
-// Uses first 4 bytes of object ID as a message ID to support 'exactly once'
-// message delivery.
-//
-// Returns error only if:
-// 1. underlying connection was closed and has not been established again;
-// 2. NATS server could not respond that it has saved the message.
-func (n *Writer) Notify(topic string, address oid.Address) error {
- if !n.nc.IsConnected() {
- return errConnIsClosed
- }
-
- // use first 4 byte of the encoded string as
- // message ID for the 'exactly once' delivery
- messageID := address.Object().EncodeToString()[:4]
-
- // check if the stream was previously created
- n.m.RLock()
- _, created := n.createdStreams[topic]
- n.m.RUnlock()
-
- if !created {
- _, err := n.js.AddStream(&nats.StreamConfig{
- Name: topic,
- })
- if err != nil {
- return fmt.Errorf("could not add stream: %w", err)
- }
-
- n.m.Lock()
- n.createdStreams[topic] = struct{}{}
- n.m.Unlock()
- }
-
- _, err := n.js.Publish(topic, []byte(address.EncodeToString()), nats.MsgId(messageID))
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// New creates new Writer.
-func New(oo ...Option) *Writer {
- w := &Writer{
- m: &sync.RWMutex{},
- createdStreams: make(map[string]struct{}),
- opts: opts{
- log: &logger.Logger{Logger: zap.L()},
- nOpts: make([]nats.Option, 0, len(oo)+3),
- },
- }
-
- for _, o := range oo {
- o(&w.opts)
- }
-
- w.opts.nOpts = append(w.opts.nOpts,
- nats.NoCallbacksAfterClientClose(), // do not call callbacks when it was planned writer stop
- nats.DisconnectErrHandler(func(conn *nats.Conn, err error) {
- w.log.Error("nats: connection was lost", zap.Error(err))
- }),
- nats.ReconnectHandler(func(conn *nats.Conn) {
- w.log.Warn("nats: reconnected to the server")
- }),
- )
-
- return w
-}
-
-// Connect tries to connect to a specified NATS endpoint.
-//
-// Connection is closed when passed context is done.
-func (n *Writer) Connect(ctx context.Context, endpoint string) error {
- nc, err := nats.Connect(endpoint, n.opts.nOpts...)
- if err != nil {
- return fmt.Errorf("could not connect to server: %w", err)
- }
-
- n.nc = nc
-
- // usage w/o options is error-free
- n.js, _ = nc.JetStream()
-
- go func() {
- <-ctx.Done()
- n.opts.log.Info("nats: closing connection as the context is done")
-
- nc.Close()
- }()
-
- return nil
-}
diff --git a/pkg/services/notificator/service.go b/pkg/services/notificator/service.go
deleted file mode 100644
index 0966183000..0000000000
--- a/pkg/services/notificator/service.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package notificator
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-// Prm groups Notificator constructor's
-// parameters. All are required.
-type Prm struct {
- writer NotificationWriter
- notificationSource NotificationSource
- logger *logger.Logger
-}
-
-// SetLogger sets a logger.
-func (prm *Prm) SetLogger(v *logger.Logger) *Prm {
- prm.logger = v
- return prm
-}
-
-// SetWriter sets notification writer.
-func (prm *Prm) SetWriter(v NotificationWriter) *Prm {
- prm.writer = v
- return prm
-}
-
-// SetNotificationSource sets notification source.
-func (prm *Prm) SetNotificationSource(v NotificationSource) *Prm {
- prm.notificationSource = v
- return prm
-}
-
-// Notificator is a notification producer that handles
-// objects with defined notification epoch.
-//
-// Working client must be created via constructor New.
-// Using the Client that has been created with new(Client)
-// expression (or just declaring a Client variable) is unsafe
-// and can lead to panic.
-type Notificator struct {
- w NotificationWriter
- ns NotificationSource
- l *logger.Logger
-}
-
-// New creates, initializes and returns the Notificator instance.
-//
-// Panics if any field of the passed Prm structure is not set/set
-// to nil.
-func New(prm *Prm) *Notificator {
- panicOnNil := func(v any, name string) {
- if v == nil {
- panic(fmt.Sprintf("Notificator constructor: %s is nil\n", name))
- }
- }
-
- panicOnNil(prm.writer, "NotificationWriter")
- panicOnNil(prm.notificationSource, "NotificationSource")
- panicOnNil(prm.logger, "Logger")
-
- return &Notificator{
- w: prm.writer,
- ns: prm.notificationSource,
- l: prm.logger,
- }
-}
-
-// ProcessEpoch looks for all objects with defined epoch in the storage
-// and passes their addresses to the NotificationWriter.
-func (n *Notificator) ProcessEpoch(epoch uint64) {
- logger := n.l.With(zap.Uint64("epoch", epoch))
- logger.Debug("notificator: start processing object notifications")
-
- n.ns.Iterate(epoch, func(topic string, addr oid.Address) {
- n.l.Debug("notificator: processing object notification",
- zap.String("topic", topic),
- zap.Stringer("address", addr),
- )
-
- n.w.Notify(topic, addr)
- })
-}
diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go
deleted file mode 100644
index 6734f9f747..0000000000
--- a/pkg/services/object/acl/acl.go
+++ /dev/null
@@ -1,277 +0,0 @@
-package acl
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- eaclV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/eacl/v2"
- v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
- bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-// CheckerPrm groups parameters for Checker
-// constructor.
-type CheckerPrm struct {
- eaclSrc container.EACLSource
- validator *eaclSDK.Validator
- localStorage *engine.StorageEngine
- state netmap.State
-}
-
-func (c *CheckerPrm) SetEACLSource(v container.EACLSource) *CheckerPrm {
- c.eaclSrc = v
- return c
-}
-
-func (c *CheckerPrm) SetValidator(v *eaclSDK.Validator) *CheckerPrm {
- c.validator = v
- return c
-}
-
-func (c *CheckerPrm) SetLocalStorage(v *engine.StorageEngine) *CheckerPrm {
- c.localStorage = v
- return c
-}
-
-func (c *CheckerPrm) SetNetmapState(v netmap.State) *CheckerPrm {
- c.state = v
- return c
-}
-
-// Checker implements v2.ACLChecker interfaces and provides
-// ACL/eACL validation functionality.
-type Checker struct {
- eaclSrc container.EACLSource
- validator *eaclSDK.Validator
- localStorage *engine.StorageEngine
- state netmap.State
-}
-
-// Various EACL check errors.
-var (
- errEACLDeniedByRule = errors.New("denied by rule")
- errBearerExpired = errors.New("bearer token has expired")
- errBearerInvalidSignature = errors.New("bearer token has invalid signature")
- errBearerInvalidContainerID = errors.New("bearer token was created for another container")
- errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
- errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
-)
-
-// NewChecker creates Checker.
-// Panics if at least one of the parameter is nil.
-func NewChecker(prm *CheckerPrm) *Checker {
- panicOnNil := func(fieldName string, field any) {
- if field == nil {
- panic(fmt.Sprintf("incorrect field %s (%T): %v", fieldName, field, field))
- }
- }
-
- panicOnNil("EACLSource", prm.eaclSrc)
- panicOnNil("EACLValidator", prm.validator)
- panicOnNil("LocalStorageEngine", prm.localStorage)
- panicOnNil("NetmapState", prm.state)
-
- return &Checker{
- eaclSrc: prm.eaclSrc,
- validator: prm.validator,
- localStorage: prm.localStorage,
- state: prm.state,
- }
-}
-
-// CheckBasicACL is a main check function for basic ACL.
-func (c *Checker) CheckBasicACL(info v2.RequestInfo) bool {
- // check basic ACL permissions
- return info.BasicACL().IsOpAllowed(info.Operation(), info.RequestRole())
-}
-
-// StickyBitCheck validates owner field in the request if sticky bit is enabled.
-func (c *Checker) StickyBitCheck(info v2.RequestInfo, owner user.ID) bool {
- // According to FrostFS specification sticky bit has no effect on system nodes
- // for correct intra-container work with objects (in particular, replication).
- if info.RequestRole() == acl.RoleContainer {
- return true
- }
-
- if !info.BasicACL().Sticky() {
- return true
- }
-
- if len(info.SenderKey()) == 0 {
- return false
- }
-
- requestSenderKey := unmarshalPublicKey(info.SenderKey())
-
- return isOwnerFromKey(owner, requestSenderKey)
-}
-
-// CheckEACL is a main check function for extended ACL.
-func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error {
- basicACL := reqInfo.BasicACL()
- if !basicACL.Extendable() {
- return nil
- }
-
- // if bearer token is not allowed, then ignore it
- if !basicACL.AllowedBearerRules(reqInfo.Operation()) {
- reqInfo.CleanBearer()
- }
-
- var table eaclSDK.Table
- cnr := reqInfo.ContainerID()
-
- bearerTok := reqInfo.Bearer()
- if bearerTok == nil {
- eaclInfo, err := c.eaclSrc.GetEACL(cnr)
- if err != nil {
- if client.IsErrEACLNotFound(err) {
- return nil
- }
- return err
- }
-
- table = *eaclInfo.Value
- } else {
- table = bearerTok.EACLTable()
- }
-
- // if bearer token is not present, isValidBearer returns true
- if err := isValidBearer(reqInfo, c.state); err != nil {
- return err
- }
-
- hdrSrcOpts := make([]eaclV2.Option, 0, 3)
-
- hdrSrcOpts = append(hdrSrcOpts,
- eaclV2.WithLocalObjectStorage(c.localStorage),
- eaclV2.WithCID(cnr),
- eaclV2.WithOID(reqInfo.ObjectID()),
- )
-
- if req, ok := msg.(eaclV2.Request); ok {
- hdrSrcOpts = append(hdrSrcOpts, eaclV2.WithServiceRequest(req))
- } else {
- hdrSrcOpts = append(hdrSrcOpts,
- eaclV2.WithServiceResponse(
- msg.(eaclV2.Response),
- reqInfo.Request().(eaclV2.Request),
- ),
- )
- }
-
- hdrSrc, err := eaclV2.NewMessageHeaderSource(hdrSrcOpts...)
- if err != nil {
- return fmt.Errorf("can't parse headers: %w", err)
- }
-
- var eaclRole eaclSDK.Role
- switch op := reqInfo.RequestRole(); op {
- default:
- eaclRole = eaclSDK.Role(op)
- case acl.RoleOwner:
- eaclRole = eaclSDK.RoleUser
- case acl.RoleInnerRing, acl.RoleContainer:
- eaclRole = eaclSDK.RoleSystem
- case acl.RoleOthers:
- eaclRole = eaclSDK.RoleOthers
- }
-
- action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit).
- WithRole(eaclRole).
- WithOperation(eaclSDK.Operation(reqInfo.Operation())).
- WithContainerID(&cnr).
- WithSenderKey(reqInfo.SenderKey()).
- WithHeaderSource(hdrSrc).
- WithEACLTable(&table),
- )
-
- if action != eaclSDK.ActionAllow {
- return errEACLDeniedByRule
- }
- return nil
-}
-
-// isValidBearer checks whether bearer token was correctly signed by authorized
-// entity. This method might be defined on whole ACL service because it will
-// require fetching current epoch to check lifetime.
-func isValidBearer(reqInfo v2.RequestInfo, st netmap.State) error {
- ownerCnr := reqInfo.ContainerOwner()
-
- token := reqInfo.Bearer()
-
- // 0. Check if bearer token is present in reqInfo.
- if token == nil {
- return nil
- }
-
- // 1. First check token lifetime. Simplest verification.
- if token.InvalidAt(st.CurrentEpoch()) {
- return errBearerExpired
- }
-
- // 2. Then check if bearer token is signed correctly.
- if !token.VerifySignature() {
- return errBearerInvalidSignature
- }
-
- // 3. Then check if container is either empty or equal to the container in the request.
- cnr, isSet := token.EACLTable().CID()
- if isSet && !cnr.Equals(reqInfo.ContainerID()) {
- return errBearerInvalidContainerID
- }
-
- // 4. Then check if container owner signed this token.
- if !bearerSDK.ResolveIssuer(*token).Equals(ownerCnr) {
- // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
- return errBearerNotSignedByOwner
- }
-
- // 5. Then check if request sender has rights to use this token.
- var keySender frostfsecdsa.PublicKey
-
- err := keySender.Decode(reqInfo.SenderKey())
- if err != nil {
- return fmt.Errorf("decode sender public key: %w", err)
- }
-
- var usrSender user.ID
- user.IDFromKey(&usrSender, ecdsa.PublicKey(keySender))
-
- if !token.AssertUser(usrSender) {
- // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
- return errBearerInvalidOwner
- }
-
- return nil
-}
-
-func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
- if key == nil {
- return false
- }
-
- var id2 user.ID
- user.IDFromKey(&id2, (ecdsa.PublicKey)(*key))
-
- return id.Equals(id2)
-}
-
-func unmarshalPublicKey(bs []byte) *keys.PublicKey {
- pub, err := keys.NewPublicKeyFromBytes(bs, elliptic.P256())
- if err != nil {
- return nil
- }
- return pub
-}
diff --git a/pkg/services/object/acl/acl_test.go b/pkg/services/object/acl/acl_test.go
deleted file mode 100644
index d3ad1e6fd5..0000000000
--- a/pkg/services/object/acl/acl_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package acl
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "github.com/stretchr/testify/require"
-)
-
-type emptyEACLSource struct{}
-
-func (e emptyEACLSource) GetEACL(_ cid.ID) (*container.EACL, error) {
- return nil, nil
-}
-
-type emptyNetmapState struct{}
-
-func (e emptyNetmapState) CurrentEpoch() uint64 {
- return 0
-}
-
-func TestStickyCheck(t *testing.T) {
- checker := NewChecker(new(CheckerPrm).
- SetLocalStorage(&engine.StorageEngine{}).
- SetValidator(eaclSDK.NewValidator()).
- SetEACLSource(emptyEACLSource{}).
- SetNetmapState(emptyNetmapState{}),
- )
-
- t.Run("system role", func(t *testing.T) {
- var info v2.RequestInfo
-
- info.SetSenderKey(make([]byte, 33)) // any non-empty key
- info.SetRequestRole(acl.RoleContainer)
-
- require.True(t, checker.StickyBitCheck(info, *usertest.ID()))
-
- var basicACL acl.Basic
- basicACL.MakeSticky()
-
- info.SetBasicACL(basicACL)
-
- require.True(t, checker.StickyBitCheck(info, *usertest.ID()))
- })
-
- t.Run("owner ID and/or public key emptiness", func(t *testing.T) {
- var info v2.RequestInfo
-
- info.SetRequestRole(acl.RoleOthers) // should be non-system role
-
- assertFn := func(isSticky, withKey, withOwner, expected bool) {
- info := info
- if isSticky {
- var basicACL acl.Basic
- basicACL.MakeSticky()
-
- info.SetBasicACL(basicACL)
- }
-
- if withKey {
- info.SetSenderKey(make([]byte, 33))
- } else {
- info.SetSenderKey(nil)
- }
-
- var ownerID user.ID
-
- if withOwner {
- ownerID = *usertest.ID()
- }
-
- require.Equal(t, expected, checker.StickyBitCheck(info, ownerID))
- }
-
- assertFn(true, false, false, false)
- assertFn(true, true, false, false)
- assertFn(true, false, true, false)
- assertFn(false, false, false, true)
- assertFn(false, true, false, true)
- assertFn(false, false, true, true)
- assertFn(false, true, true, true)
- })
-}
diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go
deleted file mode 100644
index 4570e271a9..0000000000
--- a/pkg/services/object/acl/eacl/v2/eacl_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package v2
-
-import (
- "crypto/ecdsa"
- "errors"
- "testing"
-
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-type testLocalStorage struct {
- t *testing.T
-
- expAddr oid.Address
-
- obj *object.Object
-
- err error
-}
-
-func (s *testLocalStorage) Head(addr oid.Address) (*object.Object, error) {
- require.True(s.t, addr.Container().Equals(s.expAddr.Container()))
- require.True(s.t, addr.Object().Equals(s.expAddr.Object()))
-
- return s.obj, s.err
-}
-
-func testXHeaders(strs ...string) []session.XHeader {
- res := make([]session.XHeader, len(strs)/2)
-
- for i := 0; i < len(strs); i += 2 {
- res[i/2].SetKey(strs[i])
- res[i/2].SetValue(strs[i+1])
- }
-
- return res
-}
-
-func TestHeadRequest(t *testing.T) {
- req := new(objectV2.HeadRequest)
-
- meta := new(session.RequestMetaHeader)
- req.SetMetaHeader(meta)
-
- body := new(objectV2.HeadRequestBody)
- req.SetBody(body)
-
- addr := oidtest.Address()
-
- var addrV2 refs.Address
- addr.WriteToV2(&addrV2)
-
- body.SetAddress(&addrV2)
-
- xKey := "x-key"
- xVal := "x-val"
- xHdrs := testXHeaders(
- xKey, xVal,
- )
-
- meta.SetXHeaders(xHdrs)
-
- obj := object.New()
-
- attrKey := "attr_key"
- attrVal := "attr_val"
- var attr object.Attribute
- attr.SetKey(attrKey)
- attr.SetValue(attrVal)
- obj.SetAttributes(attr)
-
- table := new(eaclSDK.Table)
-
- priv, err := keys.NewPrivateKey()
- require.NoError(t, err)
- senderKey := priv.PublicKey()
-
- r := eaclSDK.NewRecord()
- r.SetOperation(eaclSDK.OperationHead)
- r.SetAction(eaclSDK.ActionDeny)
- r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal)
- r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal)
- eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
-
- table.AddRecord(r)
-
- lStorage := &testLocalStorage{
- t: t,
- expAddr: addr,
- obj: obj,
- }
-
- id := addr.Object()
-
- newSource := func(t *testing.T) eaclSDK.TypedHeaderSource {
- hdrSrc, err := NewMessageHeaderSource(
- WithObjectStorage(lStorage),
- WithServiceRequest(req),
- WithCID(addr.Container()),
- WithOID(&id))
- require.NoError(t, err)
- return hdrSrc
- }
-
- cnr := addr.Container()
-
- unit := new(eaclSDK.ValidationUnit).
- WithContainerID(&cnr).
- WithOperation(eaclSDK.OperationHead).
- WithSenderKey(senderKey.Bytes()).
- WithEACLTable(table)
-
- validator := eaclSDK.NewValidator()
-
- checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t)))
-
- meta.SetXHeaders(nil)
-
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-
- meta.SetXHeaders(xHdrs)
-
- obj.SetAttributes()
-
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-
- lStorage.err = errors.New("any error")
-
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-
- r.SetAction(eaclSDK.ActionAllow)
-
- rID := eaclSDK.NewRecord()
- rID.SetOperation(eaclSDK.OperationHead)
- rID.SetAction(eaclSDK.ActionDeny)
- rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object())
- eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
-
- table = eaclSDK.NewTable()
- table.AddRecord(r)
- table.AddRecord(rID)
-
- unit.WithEACLTable(table)
- checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
-}
-
-func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
- actual, fromRule := v.CalculateAction(u)
- require.True(t, fromRule)
- require.Equal(t, expected, actual)
-}
-
-func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
- actual, fromRule := v.CalculateAction(u)
- require.False(t, fromRule)
- require.Equal(t, eaclSDK.ActionAllow, actual)
-}
diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go
deleted file mode 100644
index 31188fbfd8..0000000000
--- a/pkg/services/object/acl/eacl/v2/headers.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package v2
-
-import (
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-type Option func(*cfg)
-
-type cfg struct {
- storage ObjectStorage
-
- msg xHeaderSource
-
- cnr cid.ID
- obj *oid.ID
-}
-
-type ObjectStorage interface {
- Head(oid.Address) (*object.Object, error)
-}
-
-type Request interface {
- GetMetaHeader() *session.RequestMetaHeader
-}
-
-type Response interface {
- GetMetaHeader() *session.ResponseMetaHeader
-}
-
-type headerSource struct {
- requestHeaders []eaclSDK.Header
- objectHeaders []eaclSDK.Header
-
- incompleteObjectHeaders bool
-}
-
-func defaultCfg() *cfg {
- return &cfg{
- storage: new(localStorage),
- }
-}
-
-func NewMessageHeaderSource(opts ...Option) (eaclSDK.TypedHeaderSource, error) {
- cfg := defaultCfg()
-
- for i := range opts {
- opts[i](cfg)
- }
-
- if cfg.msg == nil {
- return nil, errors.New("message is not provided")
- }
-
- var res headerSource
-
- err := cfg.readObjectHeaders(&res)
- if err != nil {
- return nil, err
- }
-
- res.requestHeaders = requestHeaders(cfg.msg)
-
- return res, nil
-}
-
-func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) {
- switch typ {
- default:
- return nil, true
- case eaclSDK.HeaderFromRequest:
- return h.requestHeaders, true
- case eaclSDK.HeaderFromObject:
- return h.objectHeaders, !h.incompleteObjectHeaders
- }
-}
-
-type xHeader session.XHeader
-
-func (x xHeader) Key() string {
- return (*session.XHeader)(&x).GetKey()
-}
-
-func (x xHeader) Value() string {
- return (*session.XHeader)(&x).GetValue()
-}
-
-func requestHeaders(msg xHeaderSource) []eaclSDK.Header {
- return msg.GetXHeaders()
-}
-
-var errMissingOID = errors.New("object ID is missing")
-
-func (h *cfg) readObjectHeaders(dst *headerSource) error {
- switch m := h.msg.(type) {
- default:
- panic(fmt.Sprintf("unexpected message type %T", h.msg))
- case requestXHeaderSource:
- switch req := m.req.(type) {
- case
- *objectV2.GetRequest,
- *objectV2.HeadRequest:
- if h.obj == nil {
- return errMissingOID
- }
-
- objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
-
- dst.objectHeaders = objHeaders
- dst.incompleteObjectHeaders = !completed
- case
- *objectV2.GetRangeRequest,
- *objectV2.GetRangeHashRequest,
- *objectV2.DeleteRequest:
- if h.obj == nil {
- return errMissingOID
- }
-
- dst.objectHeaders = addressHeaders(h.cnr, h.obj)
- case *objectV2.PutRequest:
- if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
- oV2 := new(objectV2.Object)
- oV2.SetObjectID(v.GetObjectID())
- oV2.SetHeader(v.GetHeader())
-
- dst.objectHeaders = headersFromObject(object.NewFromV2(oV2), h.cnr, h.obj)
- }
- case *objectV2.SearchRequest:
- cnrV2 := req.GetBody().GetContainerID()
- var cnr cid.ID
-
- if cnrV2 != nil {
- if err := cnr.ReadFromV2(*cnrV2); err != nil {
- return fmt.Errorf("can't parse container ID: %w", err)
- }
- }
-
- dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
- }
- case responseXHeaderSource:
- switch resp := m.resp.(type) {
- default:
- objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
-
- dst.objectHeaders = objectHeaders
- dst.incompleteObjectHeaders = !completed
- case *objectV2.GetResponse:
- if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
- oV2 := new(objectV2.Object)
- oV2.SetObjectID(v.GetObjectID())
- oV2.SetHeader(v.GetHeader())
-
- dst.objectHeaders = headersFromObject(object.NewFromV2(oV2), h.cnr, h.obj)
- }
- case *objectV2.HeadResponse:
- oV2 := new(objectV2.Object)
-
- var hdr *objectV2.Header
-
- switch v := resp.GetBody().GetHeaderPart().(type) {
- case *objectV2.ShortHeader:
- hdr = new(objectV2.Header)
-
- var idV2 refsV2.ContainerID
- h.cnr.WriteToV2(&idV2)
-
- hdr.SetContainerID(&idV2)
- hdr.SetVersion(v.GetVersion())
- hdr.SetCreationEpoch(v.GetCreationEpoch())
- hdr.SetOwnerID(v.GetOwnerID())
- hdr.SetObjectType(v.GetObjectType())
- hdr.SetPayloadLength(v.GetPayloadLength())
- case *objectV2.HeaderWithSignature:
- hdr = v.GetHeader()
- }
-
- oV2.SetHeader(hdr)
-
- dst.objectHeaders = headersFromObject(object.NewFromV2(oV2), h.cnr, h.obj)
- }
- }
-
- return nil
-}
-
-func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) {
- if idObj != nil {
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(*idObj)
-
- obj, err := h.storage.Head(addr)
- if err == nil {
- return headersFromObject(obj, cnr, idObj), true
- }
- }
-
- return addressHeaders(cnr, idObj), false
-}
-
-func cidHeader(idCnr cid.ID) sysObjHdr {
- return sysObjHdr{
- k: acl.FilterObjectContainerID,
- v: idCnr.EncodeToString(),
- }
-}
-
-func oidHeader(obj oid.ID) sysObjHdr {
- return sysObjHdr{
- k: acl.FilterObjectID,
- v: obj.EncodeToString(),
- }
-}
-
-func ownerIDHeader(ownerID user.ID) sysObjHdr {
- return sysObjHdr{
- k: acl.FilterObjectOwnerID,
- v: ownerID.EncodeToString(),
- }
-}
-
-func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
- hh := make([]eaclSDK.Header, 0, 2)
- hh = append(hh, cidHeader(cnr))
-
- if oid != nil {
- hh = append(hh, oidHeader(*oid))
- }
-
- return hh
-}
diff --git a/pkg/services/object/acl/eacl/v2/localstore.go b/pkg/services/object/acl/eacl/v2/localstore.go
deleted file mode 100644
index 40271f1cdc..0000000000
--- a/pkg/services/object/acl/eacl/v2/localstore.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package v2
-
-import (
- "io"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type localStorage struct {
- ls *engine.StorageEngine
-}
-
-func (s *localStorage) Head(addr oid.Address) (*objectSDK.Object, error) {
- if s.ls == nil {
- return nil, io.ErrUnexpectedEOF
- }
-
- return engine.Head(s.ls, addr)
-}
diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go
deleted file mode 100644
index 0a63981cb4..0000000000
--- a/pkg/services/object/acl/eacl/v2/object.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package v2
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type sysObjHdr struct {
- k, v string
-}
-
-func (s sysObjHdr) Key() string {
- return s.k
-}
-
-func (s sysObjHdr) Value() string {
- return s.v
-}
-
-func u64Value(v uint64) string {
- return strconv.FormatUint(v, 10)
-}
-
-func headersFromObject(obj *object.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
- var count int
- for obj := obj; obj != nil; obj = obj.Parent() {
- count += 9 + len(obj.Attributes())
- }
-
- res := make([]eaclSDK.Header, 0, count)
- for ; obj != nil; obj = obj.Parent() {
- res = append(res,
- cidHeader(cnr),
- // creation epoch
- sysObjHdr{
- k: acl.FilterObjectCreationEpoch,
- v: u64Value(obj.CreationEpoch()),
- },
- // payload size
- sysObjHdr{
- k: acl.FilterObjectPayloadLength,
- v: u64Value(obj.PayloadSize()),
- },
- // object version
- sysObjHdr{
- k: acl.FilterObjectVersion,
- v: obj.Version().String(),
- },
- // object type
- sysObjHdr{
- k: acl.FilterObjectType,
- v: obj.Type().String(),
- },
- )
-
- if oid != nil {
- res = append(res, oidHeader(*oid))
- }
-
- if idOwner := obj.OwnerID(); idOwner != nil {
- res = append(res, ownerIDHeader(*idOwner))
- }
-
- cs, ok := obj.PayloadChecksum()
- if ok {
- res = append(res, sysObjHdr{
- k: acl.FilterObjectPayloadHash,
- v: cs.String(),
- })
- }
-
- cs, ok = obj.PayloadHomomorphicHash()
- if ok {
- res = append(res, sysObjHdr{
- k: acl.FilterObjectHomomorphicHash,
- v: cs.String(),
- })
- }
-
- attrs := obj.Attributes()
- for i := range attrs {
- res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface
- }
- }
-
- return res
-}
diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go
deleted file mode 100644
index 7657e87807..0000000000
--- a/pkg/services/object/acl/eacl/v2/opts.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func WithObjectStorage(v ObjectStorage) Option {
- return func(c *cfg) {
- c.storage = v
- }
-}
-
-func WithLocalObjectStorage(v *engine.StorageEngine) Option {
- return func(c *cfg) {
- c.storage = &localStorage{
- ls: v,
- }
- }
-}
-
-func WithServiceRequest(v Request) Option {
- return func(c *cfg) {
- c.msg = requestXHeaderSource{
- req: v,
- }
- }
-}
-
-func WithServiceResponse(resp Response, req Request) Option {
- return func(c *cfg) {
- c.msg = responseXHeaderSource{
- resp: resp,
- req: req,
- }
- }
-}
-
-func WithCID(v cid.ID) Option {
- return func(c *cfg) {
- c.cnr = v
- }
-}
-
-func WithOID(v *oid.ID) Option {
- return func(c *cfg) {
- c.obj = v
- }
-}
diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go
deleted file mode 100644
index 246714af7c..0000000000
--- a/pkg/services/object/acl/eacl/v2/xheader.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
-)
-
-type xHeaderSource interface {
- GetXHeaders() []eaclSDK.Header
-}
-
-type requestXHeaderSource struct {
- req Request
-}
-
-type responseXHeaderSource struct {
- resp Response
-
- req Request
-}
-
-func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header {
- ln := 0
-
- for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
- ln += len(meta.GetXHeaders())
- }
-
- res := make([]eaclSDK.Header, 0, ln)
- for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
- x := meta.GetXHeaders()
- for i := range x {
- res = append(res, (xHeader)(x[i]))
- }
- }
-
- return res
-}
-
-func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header {
- ln := 0
- xHdrs := make([][]session.XHeader, 0)
-
- for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
- x := meta.GetXHeaders()
-
- ln += len(x)
-
- xHdrs = append(xHdrs, x)
- }
-
- res := make([]eaclSDK.Header, 0, ln)
-
- for i := range xHdrs {
- for j := range xHdrs[i] {
- res = append(res, xHeader(xHdrs[i][j]))
- }
- }
-
- return res
-}
diff --git a/pkg/services/object/acl/v2/classifier.go b/pkg/services/object/acl/v2/classifier.go
deleted file mode 100644
index 2bf5a39589..0000000000
--- a/pkg/services/object/acl/v2/classifier.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package v2
-
-import (
- "bytes"
- "crypto/sha256"
-
- core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "go.uber.org/zap"
-)
-
-type senderClassifier struct {
- log *logger.Logger
- innerRing InnerRingFetcher
- netmap core.Source
-}
-
-type classifyResult struct {
- role acl.Role
- key []byte
-}
-
-func (c senderClassifier) classify(
- req MetaWithToken,
- idCnr cid.ID,
- cnr container.Container) (res *classifyResult, err error) {
- ownerID, ownerKey, err := req.RequestOwner()
- if err != nil {
- return nil, err
- }
-
- ownerKeyInBytes := ownerKey.Bytes()
-
- // TODO: #767 get owner from frostfs.id if present
-
- // if request owner is the same as container owner, return RoleUser
- if ownerID.Equals(cnr.Owner()) {
- return &classifyResult{
- role: acl.RoleOwner,
- key: ownerKeyInBytes,
- }, nil
- }
-
- isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
- if err != nil {
- // do not throw error, try best case matching
- c.log.Debug("can't check if request from inner ring",
- zap.String("error", err.Error()))
- } else if isInnerRingNode {
- return &classifyResult{
- role: acl.RoleInnerRing,
- key: ownerKeyInBytes,
- }, nil
- }
-
- binCnr := make([]byte, sha256.Size)
- idCnr.Encode(binCnr)
-
- isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr)
- if err != nil {
- // error might happen if request has `RoleOther` key and placement
- // is not possible for previous epoch, so
- // do not throw error, try best case matching
- c.log.Debug("can't check if request from container node",
- zap.String("error", err.Error()))
- } else if isContainerNode {
- return &classifyResult{
- role: acl.RoleContainer,
- key: ownerKeyInBytes,
- }, nil
- }
-
- // if none of above, return RoleOthers
- return &classifyResult{
- role: acl.RoleOthers,
- key: ownerKeyInBytes,
- }, nil
-}
-
-func (c senderClassifier) isInnerRingKey(owner []byte) (bool, error) {
- innerRingKeys, err := c.innerRing.InnerRingKeys()
- if err != nil {
- return false, err
- }
-
- // if request owner key in the inner ring list, return RoleSystem
- for i := range innerRingKeys {
- if bytes.Equal(innerRingKeys[i], owner) {
- return true, nil
- }
- }
-
- return false, nil
-}
-
-func (c senderClassifier) isContainerKey(
- owner, idCnr []byte,
- cnr container.Container) (bool, error) {
- nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap
- if err != nil {
- return false, err
- }
-
- in, err := lookupKeyInContainer(nm, owner, idCnr, cnr)
- if err != nil {
- return false, err
- } else if in {
- return true, nil
- }
-
- // then check previous netmap, this can happen in-between epoch change
- // when node migrates data from last epoch container
- nm, err = core.GetPreviousNetworkMap(c.netmap)
- if err != nil {
- return false, err
- }
-
- return lookupKeyInContainer(nm, owner, idCnr, cnr)
-}
-
-func lookupKeyInContainer(
- nm *netmap.NetMap,
- owner, idCnr []byte,
- cnr container.Container) (bool, error) {
- cnrVectors, err := nm.ContainerNodes(cnr.PlacementPolicy(), idCnr)
- if err != nil {
- return false, err
- }
-
- for i := range cnrVectors {
- for j := range cnrVectors[i] {
- if bytes.Equal(cnrVectors[i][j].PublicKey(), owner) {
- return true, nil
- }
- }
- }
-
- return false, nil
-}
diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go
deleted file mode 100644
index 70c1ae9914..0000000000
--- a/pkg/services/object/acl/v2/errors.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package v2
-
-import (
- "fmt"
-
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
-)
-
-const invalidRequestMessage = "malformed request"
-
-func malformedRequestError(reason string) error {
- return fmt.Errorf("%s: %s", invalidRequestMessage, reason)
-}
-
-var (
- errEmptyBody = malformedRequestError("empty body")
- errEmptyVerificationHeader = malformedRequestError("empty verification header")
- errEmptyBodySig = malformedRequestError("empty at body signature")
- errInvalidSessionSig = malformedRequestError("invalid session token signature")
- errInvalidSessionOwner = malformedRequestError("invalid session token owner")
- errInvalidVerb = malformedRequestError("session token verb is invalid")
-)
-
-const accessDeniedACLReasonFmt = "access to operation %s is denied by basic ACL check"
-const accessDeniedEACLReasonFmt = "access to operation %s is denied by extended ACL check: %v"
-
-func basicACLErr(info RequestInfo) error {
- var errAccessDenied apistatus.ObjectAccessDenied
- errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedACLReasonFmt, info.operation))
-
- return errAccessDenied
-}
-
-func eACLErr(info RequestInfo, err error) error {
- var errAccessDenied apistatus.ObjectAccessDenied
- errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedEACLReasonFmt, info.operation, err))
-
- return errAccessDenied
-}
diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go
deleted file mode 100644
index 7e937da069..0000000000
--- a/pkg/services/object/acl/v2/opts.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// WithLogger returns option to set logger.
-func WithLogger(v *logger.Logger) Option {
- return func(c *cfg) {
- c.log = v
- }
-}
-
-// WithNetmapSource return option to set
-// netmap source.
-func WithNetmapSource(v netmap.Source) Option {
- return func(c *cfg) {
- c.nm = v
- }
-}
-
-// WithContainerSource returns option to set container source.
-func WithContainerSource(v container.Source) Option {
- return func(c *cfg) {
- c.containers = v
- }
-}
-
-// WithNextService returns option to set next object service.
-func WithNextService(v objectSvc.ServiceServer) Option {
- return func(c *cfg) {
- c.next = v
- }
-}
-
-// WithEACLChecker returns option to set eACL checker.
-func WithEACLChecker(v ACLChecker) Option {
- return func(c *cfg) {
- c.checker = v
- }
-}
-
-// WithIRFetcher returns option to set inner ring fetcher.
-func WithIRFetcher(v InnerRingFetcher) Option {
- return func(c *cfg) {
- c.irFetcher = v
- }
-}
diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go
deleted file mode 100644
index 0cf734d7ab..0000000000
--- a/pkg/services/object/acl/v2/request.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package v2
-
-import (
- "crypto/ecdsa"
- "fmt"
-
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-// RequestInfo groups parsed version-independent (from SDK library)
-// request information and raw API request.
-type RequestInfo struct {
- basicACL acl.Basic
- requestRole acl.Role
- operation acl.Op // put, get, head, etc.
- cnrOwner user.ID // container owner
-
- idCnr cid.ID
-
- // optional for some request
- // e.g. Put, Search
- obj *oid.ID
-
- senderKey []byte
-
- bearer *bearer.Token // bearer token of request
-
- srcRequest any
-}
-
-func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) {
- r.basicACL = basicACL
-}
-
-func (r *RequestInfo) SetRequestRole(requestRole acl.Role) {
- r.requestRole = requestRole
-}
-
-func (r *RequestInfo) SetSenderKey(senderKey []byte) {
- r.senderKey = senderKey
-}
-
-// Request returns raw API request.
-func (r RequestInfo) Request() any {
- return r.srcRequest
-}
-
-// ContainerOwner returns owner if the container.
-func (r RequestInfo) ContainerOwner() user.ID {
- return r.cnrOwner
-}
-
-// ObjectID return object ID.
-func (r RequestInfo) ObjectID() *oid.ID {
- return r.obj
-}
-
-// ContainerID return container ID.
-func (r RequestInfo) ContainerID() cid.ID {
- return r.idCnr
-}
-
-// CleanBearer forces cleaning bearer token information.
-func (r *RequestInfo) CleanBearer() {
- r.bearer = nil
-}
-
-// Bearer returns bearer token of the request.
-func (r RequestInfo) Bearer() *bearer.Token {
- return r.bearer
-}
-
-// BasicACL returns basic ACL of the container.
-func (r RequestInfo) BasicACL() acl.Basic {
- return r.basicACL
-}
-
-// SenderKey returns public key of the request's sender.
-func (r RequestInfo) SenderKey() []byte {
- return r.senderKey
-}
-
-// Operation returns request's operation.
-func (r RequestInfo) Operation() acl.Op {
- return r.operation
-}
-
-// RequestRole returns request sender's role.
-func (r RequestInfo) RequestRole() acl.Role {
- return r.requestRole
-}
-
-// MetaWithToken groups session and bearer tokens,
-// verification header and raw API request.
-type MetaWithToken struct {
- vheader *sessionV2.RequestVerificationHeader
- token *sessionSDK.Object
- bearer *bearer.Token
- src any
-}
-
-// RequestOwner returns ownerID and its public key
-// according to internal meta information.
-func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) {
- if r.vheader == nil {
- return nil, nil, errEmptyVerificationHeader
- }
-
- // if session token is presented, use it as truth source
- if r.token != nil {
- // verify signature of session token
- return ownerFromToken(r.token)
- }
-
- // otherwise get original body signature
- bodySignature := originalBodySignature(r.vheader)
- if bodySignature == nil {
- return nil, nil, errEmptyBodySig
- }
-
- key, err := unmarshalPublicKey(bodySignature.GetKey())
- if err != nil {
- return nil, nil, fmt.Errorf("invalid key in body signature: %w", err)
- }
-
- var idSender user.ID
- user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
-
- return &idSender, key, nil
-}
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
deleted file mode 100644
index 6bae0d0caa..0000000000
--- a/pkg/services/object/acl/v2/service.go
+++ /dev/null
@@ -1,612 +0,0 @@
-package v2
-
-import (
- "context"
- "errors"
- "fmt"
-
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "go.uber.org/zap"
-)
-
-// Service checks basic ACL rules.
-type Service struct {
- *cfg
-
- c senderClassifier
-}
-
-type putStreamBasicChecker struct {
- source *Service
- next object.PutObjectStream
-}
-
-type getStreamBasicChecker struct {
- checker ACLChecker
-
- object.GetObjectStream
-
- info RequestInfo
-}
-
-type rangeStreamBasicChecker struct {
- checker ACLChecker
-
- object.GetObjectRangeStream
-
- info RequestInfo
-}
-
-type searchStreamBasicChecker struct {
- checker ACLChecker
-
- object.SearchStream
-
- info RequestInfo
-}
-
-// Option represents Service constructor option.
-type Option func(*cfg)
-
-type cfg struct {
- log *logger.Logger
-
- containers container.Source
-
- checker ACLChecker
-
- irFetcher InnerRingFetcher
-
- nm netmap.Source
-
- next object.ServiceServer
-}
-
-func defaultCfg() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// New is a constructor for object ACL checking service.
-func New(opts ...Option) Service {
- cfg := defaultCfg()
-
- for i := range opts {
- opts[i](cfg)
- }
-
- panicOnNil := func(v any, name string) {
- if v == nil {
- panic(fmt.Sprintf("ACL service: %s is nil", name))
- }
- }
-
- panicOnNil(cfg.next, "next Service")
- panicOnNil(cfg.nm, "netmap client")
- panicOnNil(cfg.irFetcher, "inner Ring fetcher")
- panicOnNil(cfg.checker, "acl checker")
- panicOnNil(cfg.containers, "container source")
-
- return Service{
- cfg: cfg,
- c: senderClassifier{
- log: cfg.log,
- innerRing: cfg.irFetcher,
- netmap: cfg.nm,
- },
- }
-}
-
-// Get implements ServiceServer interface, makes ACL checks and calls
-// next Get method in the ServiceServer pipeline.
-func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectGet)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- if !b.checker.CheckBasicACL(reqInfo) {
- return basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
-
- return b.next.Get(request, &getStreamBasicChecker{
- GetObjectStream: stream,
- info: reqInfo,
- checker: b.checker,
- })
-}
-
-func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) {
- streamer, err := b.next.Put(ctx)
-
- return putStreamBasicChecker{
- source: &b,
- next: streamer,
- }, err
-}
-
-func (b Service) Head(
- ctx context.Context,
- request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return nil, err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return nil, err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHead)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if !b.checker.CheckBasicACL(reqInfo) {
- return nil, basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
-
- resp, err := b.next.Head(ctx, request)
- if err == nil {
- if err = b.checker.CheckEACL(resp, reqInfo); err != nil {
- err = eACLErr(reqInfo, err)
- }
- }
-
- return resp, err
-}
-
-func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
- id, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, id, nil)
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, id, acl.OpObjectSearch)
- if err != nil {
- return err
- }
-
- if !b.checker.CheckBasicACL(reqInfo) {
- return basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
-
- return b.next.Search(request, &searchStreamBasicChecker{
- checker: b.checker,
- SearchStream: stream,
- info: reqInfo,
- })
-}
-
-func (b Service) Delete(
- ctx context.Context,
- request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return nil, err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return nil, err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectDelete)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if !b.checker.CheckBasicACL(reqInfo) {
- return nil, basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
-
- return b.next.Delete(ctx, request)
-}
-
-func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectRange)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- if !b.checker.CheckBasicACL(reqInfo) {
- return basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
-
- return b.next.GetRange(request, &rangeStreamBasicChecker{
- checker: b.checker,
- GetObjectRangeStream: stream,
- info: reqInfo,
- })
-}
-
-func (b Service) GetRangeHash(
- ctx context.Context,
- request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return nil, err
- }
-
- obj, err := getObjectIDFromRequestBody(request.GetBody())
- if err != nil {
- return nil, err
- }
-
- sTok, err := originalSessionToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- if sTok != nil {
- err = assertSessionRelation(*sTok, cnr, obj)
- if err != nil {
- return nil, err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return nil, err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHash)
- if err != nil {
- return nil, err
- }
-
- reqInfo.obj = obj
-
- if !b.checker.CheckBasicACL(reqInfo) {
- return nil, basicACLErr(reqInfo)
- } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
- return nil, eACLErr(reqInfo, err)
- }
-
- return b.next.GetRangeHash(ctx, request)
-}
-
-func (p putStreamBasicChecker) Send(request *objectV2.PutRequest) error {
- body := request.GetBody()
- if body == nil {
- return errEmptyBody
- }
-
- part := body.GetObjectPart()
- if part, ok := part.(*objectV2.PutObjectPartInit); ok {
- cnr, err := getContainerIDFromRequest(request)
- if err != nil {
- return err
- }
-
- idV2 := part.GetHeader().GetOwnerID()
- if idV2 == nil {
- return errors.New("missing object owner")
- }
-
- var idOwner user.ID
-
- err = idOwner.ReadFromV2(*idV2)
- if err != nil {
- return fmt.Errorf("invalid object owner: %w", err)
- }
-
- objV2 := part.GetObjectID()
- var obj *oid.ID
-
- if objV2 != nil {
- obj = new(oid.ID)
-
- err = obj.ReadFromV2(*objV2)
- if err != nil {
- return err
- }
- }
-
- var sTok *sessionSDK.Object
-
- if tokV2 := request.GetMetaHeader().GetSessionToken(); tokV2 != nil {
- sTok = new(sessionSDK.Object)
-
- err = sTok.ReadFromV2(*tokV2)
- if err != nil {
- return fmt.Errorf("invalid session token: %w", err)
- }
-
- if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
- // if session relates to object's removal, we don't check
- // relation of the tombstone to the session here since user
- // can't predict tomb's ID.
- err = assertSessionRelation(*sTok, cnr, nil)
- } else {
- err = assertSessionRelation(*sTok, cnr, obj)
- }
-
- if err != nil {
- return err
- }
- }
-
- bTok, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return err
- }
-
- req := MetaWithToken{
- vheader: request.GetVerificationHeader(),
- token: sTok,
- bearer: bTok,
- src: request,
- }
-
- reqInfo, err := p.source.findRequestInfo(req, cnr, acl.OpObjectPut)
- if err != nil {
- return err
- }
-
- reqInfo.obj = obj
-
- if !p.source.checker.CheckBasicACL(reqInfo) || !p.source.checker.StickyBitCheck(reqInfo, idOwner) {
- return basicACLErr(reqInfo)
- } else if err := p.source.checker.CheckEACL(request, reqInfo); err != nil {
- return eACLErr(reqInfo, err)
- }
- }
-
- return p.next.Send(request)
-}
-
-func (p putStreamBasicChecker) CloseAndRecv() (*objectV2.PutResponse, error) {
- return p.next.CloseAndRecv()
-}
-
-func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
- if _, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
- if err := g.checker.CheckEACL(resp, g.info); err != nil {
- return eACLErr(g.info, err)
- }
- }
-
- return g.GetObjectStream.Send(resp)
-}
-
-func (g *rangeStreamBasicChecker) Send(resp *objectV2.GetRangeResponse) error {
- if err := g.checker.CheckEACL(resp, g.info); err != nil {
- return eACLErr(g.info, err)
- }
-
- return g.GetObjectRangeStream.Send(resp)
-}
-
-func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error {
- if err := g.checker.CheckEACL(resp, g.info); err != nil {
- return eACLErr(g.info, err)
- }
-
- return g.SearchStream.Send(resp)
-}
-
-func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) {
- cnr, err := b.containers.Get(idCnr) // fetch actual container
- if err != nil {
- return info, err
- }
-
- if req.token != nil {
- currentEpoch, err := b.nm.Epoch()
- if err != nil {
- return info, errors.New("can't fetch current epoch")
- }
- if req.token.ExpiredAt(currentEpoch) {
- return info, apistatus.SessionTokenExpired{}
- }
- if req.token.InvalidAt(currentEpoch) {
- return info, fmt.Errorf("%s: token is invalid at %d epoch)",
- invalidRequestMessage, currentEpoch)
- }
-
- if !assertVerb(*req.token, op) {
- return info, errInvalidVerb
- }
- }
-
- // find request role and key
- res, err := b.c.classify(req, idCnr, cnr.Value)
- if err != nil {
- return info, err
- }
-
- info.basicACL = cnr.Value.BasicACL()
- info.requestRole = res.role
- info.operation = op
- info.cnrOwner = cnr.Value.Owner()
- info.idCnr = idCnr
-
- // it is assumed that at the moment the key will be valid,
- // otherwise the request would not pass validation
- info.senderKey = res.key
-
- // add bearer token if it is present in request
- info.bearer = req.bearer
-
- info.srcRequest = req.src
-
- return info, nil
-}
diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go
deleted file mode 100644
index 061cd26b62..0000000000
--- a/pkg/services/object/acl/v2/types.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package v2
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-// ACLChecker is an interface that must provide
-// ACL related checks.
-type ACLChecker interface {
- // CheckBasicACL must return true only if request
- // passes basic ACL validation.
- CheckBasicACL(RequestInfo) bool
- // CheckEACL must return non-nil error if request
- // doesn't pass extended ACL validation.
- CheckEACL(any, RequestInfo) error
- // StickyBitCheck must return true only if sticky bit
- // is disabled or enabled but request contains correct
- // owner field.
- StickyBitCheck(RequestInfo, user.ID) bool
-}
-
-// InnerRingFetcher is an interface that must provide
-// Inner Ring information.
-type InnerRingFetcher interface {
- // InnerRingKeys must return list of public keys of
- // the actual inner ring.
- InnerRingKeys() ([][]byte, error)
-}
diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go
deleted file mode 100644
index 394feef4ef..0000000000
--- a/pkg/services/object/acl/v2/util_test.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package v2
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
- aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestOriginalTokens(t *testing.T) {
- sToken := sessiontest.ObjectSigned()
- bToken := bearertest.Token()
-
- pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- require.NoError(t, bToken.Sign(*pk))
-
- var bTokenV2 acl.BearerToken
- bToken.WriteToV2(&bTokenV2)
- // This line is needed because SDK uses some custom format for
- // reserved filters, so `cid.ID` is not converted to string immediately.
- require.NoError(t, bToken.ReadFromV2(bTokenV2))
-
- var sTokenV2 session.Token
- sToken.WriteToV2(&sTokenV2)
-
- for i := 0; i < 10; i++ {
- metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
- res, err := originalSessionToken(metaHeaders)
- require.NoError(t, err)
- require.Equal(t, sToken, res, i)
-
- bTok, err := originalBearerToken(metaHeaders)
- require.NoError(t, err)
- require.Equal(t, &bToken, bTok, i)
- }
-}
-
-func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader {
- metaHeader := new(session.RequestMetaHeader)
- metaHeader.SetBearerToken(b)
- metaHeader.SetSessionToken(s)
-
- for i := uint32(0); i < depth; i++ {
- link := metaHeader
- metaHeader = new(session.RequestMetaHeader)
- metaHeader.SetOrigin(link)
- }
-
- return metaHeader
-}
-
-func TestIsVerbCompatible(t *testing.T) {
- // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28
- table := map[aclsdk.Op][]sessionSDK.ObjectVerb{
- aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete},
- aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete},
- aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet},
- aclsdk.OpObjectHead: {
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- },
- aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash},
- aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash},
- aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
- }
-
- verbs := []sessionSDK.ObjectVerb{
- sessionSDK.VerbObjectPut,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectSearch,
- }
-
- var tok sessionSDK.Object
-
- for op, list := range table {
- for _, verb := range verbs {
- var contains bool
- for _, v := range list {
- if v == verb {
- contains = true
- break
- }
- }
-
- tok.ForVerb(verb)
-
- require.Equal(t, contains, assertVerb(tok, op),
- "%v in token, %s executing", verb, op)
- }
- }
-}
-
-func TestAssertSessionRelation(t *testing.T) {
- var tok sessionSDK.Object
- cnr := cidtest.ID()
- cnrOther := cidtest.ID()
- obj := oidtest.ID()
- objOther := oidtest.ID()
-
- // make sure ids differ, otherwise test won't work correctly
- require.False(t, cnrOther.Equals(cnr))
- require.False(t, objOther.Equals(obj))
-
- // bind session to the container (required)
- tok.BindContainer(cnr)
-
- // test container-global session
- require.NoError(t, assertSessionRelation(tok, cnr, nil))
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnrOther, nil))
- require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
-
- // limit the session to the particular object
- tok.LimitByObjects(obj)
-
- // test fixed object session (here obj arg must be non-nil everywhere)
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnr, &objOther))
-}
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
new file mode 100644
index 0000000000..bb6067a373
--- /dev/null
+++ b/pkg/services/object/ape/checker.go
@@ -0,0 +1,114 @@
+package ape
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+type checkerImpl struct {
+ checkerCore checkercore.CheckCore
+ frostFSIDClient frostfsidcore.SubjectProvider
+ headerProvider HeaderProvider
+ nm netmap.Source
+ cnrSource container.Source
+ nodePK []byte
+}
+
+func NewChecker(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, headerProvider HeaderProvider, frostFSIDClient frostfsidcore.SubjectProvider, nm netmap.Source, st netmap.State, cnrSource container.Source, nodePK []byte) Checker {
+ return &checkerImpl{
+ checkerCore: checkercore.New(localOverrideStorage, morphChainStorage, frostFSIDClient, st),
+ frostFSIDClient: frostFSIDClient,
+ headerProvider: headerProvider,
+ nm: nm,
+ cnrSource: cnrSource,
+ nodePK: nodePK,
+ }
+}
+
+type Prm struct {
+ Namespace string
+
+ Container cid.ID
+
+ // Object ID is omitted for some methods.
+ Object *oid.ID
+
+ // If Header is set, then object attributes and properties will be parsed from
+ // a request/response's header.
+ Header *objectV2.Header
+
+ // Method must be represented only as a constant represented in native schema.
+ Method string
+
+ // Role must be representedonly as a constant represented in native schema.
+ Role string
+
+ // An encoded sender's public key string.
+ SenderKey string
+
+ // An encoded container's owner user ID.
+ ContainerOwner user.ID
+
+ // Attributes defined for the container.
+ ContainerAttributes map[string]string
+
+ // The request's bearer token. It is used in order to check APE overrides with the token.
+ BearerToken *bearer.Token
+
+ // XHeaders from the request.
+ XHeaders []session.XHeader
+}
+
+var errMissingOID = errors.New("object ID is not set")
+
+// CheckAPE prepares an APE-request and checks if it is permitted by policies.
+func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
+ // APE check is ignored for some inter-node requests.
+ switch prm.Role {
+ case nativeschema.PropertyValueContainerRoleContainer:
+ return nil
+ case nativeschema.PropertyValueContainerRoleIR:
+ switch prm.Method {
+ case nativeschema.MethodGetObject,
+ nativeschema.MethodHeadObject,
+ nativeschema.MethodSearchObject,
+ nativeschema.MethodRangeObject,
+ nativeschema.MethodHashObject:
+ return nil
+ default:
+ }
+ }
+
+ r, err := c.newAPERequest(ctx, prm)
+ if err != nil {
+ return fmt.Errorf("failed to create ape request: %w", err)
+ }
+ pub, err := keys.NewPublicKeyFromString(prm.SenderKey)
+ if err != nil {
+ return err
+ }
+
+ return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{
+ Request: r,
+ PublicKey: pub,
+ Namespace: prm.Namespace,
+ Container: prm.Container,
+ ContainerOwner: prm.ContainerOwner,
+ BearerToken: prm.BearerToken,
+ })
+}
diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go
new file mode 100644
index 0000000000..97eb2b2d78
--- /dev/null
+++ b/pkg/services/object/ape/checker_test.go
@@ -0,0 +1,782 @@
+package ape
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+ commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+type headerProviderMock struct {
+ m map[oid.Address]*objectSDK.Object
+}
+
+var _ HeaderProvider = (*headerProviderMock)(nil)
+
+func (h *headerProviderMock) addHeader(c cid.ID, o oid.ID, header *objectSDK.Object) {
+ var addr oid.Address
+ addr.SetContainer(c)
+ addr.SetObject(o)
+ h.m[addr] = header
+}
+
+func (h *headerProviderMock) GetHeader(_ context.Context, c cid.ID, o oid.ID, _ bool) (*objectSDK.Object, error) {
+ var addr oid.Address
+ addr.SetContainer(c)
+ addr.SetObject(o)
+ obj, ok := h.m[addr]
+ if !ok {
+ return nil, fmt.Errorf("address not found")
+ }
+ return obj, nil
+}
+
+func newHeaderProviderMock() *headerProviderMock {
+ return &headerProviderMock{
+ m: make(map[oid.Address]*objectSDK.Object),
+ }
+}
+
+func newContainerIDSDK(t *testing.T, encodedCID string) cid.ID {
+ var cnr cid.ID
+ require.NoError(t, cnr.DecodeString(encodedCID))
+ return cnr
+}
+
+func newObjectIDSDK(t *testing.T, encodedOID *string) *oid.ID {
+ if encodedOID == nil {
+ return nil
+ }
+ obj := new(oid.ID)
+ require.NoError(t, obj.DecodeString(*encodedOID))
+ return obj
+}
+
+type headerObjectSDKParams struct {
+ majorVersion, minorVersion uint32
+ owner user.ID
+ epoch uint64
+ payloadSize uint64
+ typ objectSDK.Type
+ payloadChecksum checksum.Checksum
+ payloadHomomorphicHash checksum.Checksum
+ attributes []struct {
+ key string
+ val string
+ }
+}
+
+func stringPtr(s string) *string {
+ return &s
+}
+
+func newHeaderObjectSDK(cnr cid.ID, oid *oid.ID, headerObjSDK *headerObjectSDKParams) *objectSDK.Object {
+ objSDK := objectSDK.New()
+ objSDK.SetContainerID(cnr)
+ if oid != nil {
+ objSDK.SetID(*oid)
+ }
+ if headerObjSDK == nil {
+ return objSDK
+ }
+ ver := new(version.Version)
+ ver.SetMajor(headerObjSDK.majorVersion)
+ ver.SetMinor(headerObjSDK.minorVersion)
+ objSDK.SetVersion(ver)
+ objSDK.SetCreationEpoch(headerObjSDK.epoch)
+ objSDK.SetOwnerID(headerObjSDK.owner)
+ objSDK.SetPayloadSize(headerObjSDK.payloadSize)
+ objSDK.SetType(headerObjSDK.typ)
+ objSDK.SetPayloadChecksum(headerObjSDK.payloadChecksum)
+ objSDK.SetPayloadHomomorphicHash(headerObjSDK.payloadHomomorphicHash)
+
+ var attrs []objectSDK.Attribute
+ for _, attr := range headerObjSDK.attributes {
+ attrSDK := objectSDK.NewAttribute()
+ attrSDK.SetKey(attr.key)
+ attrSDK.SetValue(attr.val)
+ attrs = append(attrs, *attrSDK)
+ }
+ objSDK.SetAttributes(attrs...)
+
+ return objSDK
+}
+
+type testHeader struct {
+ headerObjSDK *headerObjectSDKParams
+
+ // If fromHeaderProvider is set, then running test should
+ // consider that a header is recieved from a header provider.
+ fromHeaderProvider bool
+
+ // If fromHeaderProvider is set, then running test should
+ // consider that a header is recieved from a message header.
+ fromRequestResponseHeader bool
+}
+
+var (
+ methodsRequiredOID = []string{
+ nativeschema.MethodGetObject,
+ nativeschema.MethodHeadObject,
+ nativeschema.MethodRangeObject,
+ nativeschema.MethodHashObject,
+ nativeschema.MethodDeleteObject,
+ }
+
+ methodsOptionalOID = []string{
+ nativeschema.MethodSearchObject, nativeschema.MethodPutObject,
+ }
+
+ namespace = "test_namespace"
+
+ containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy"
+
+ objectID = "BzQw5HH3feoxFDD5tCT87Y1726qzgLfxEE7wgtoRzB3R"
+
+ groupID = "1"
+
+ role = "Container"
+
+ senderPrivateKey, _ = keys.NewPrivateKey()
+
+ senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes())
+)
+
+type frostfsIDProviderMock struct {
+ subjects map[util.Uint160]*client.Subject
+ subjectsExtended map[util.Uint160]*client.SubjectExtended
+}
+
+var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil)
+
+func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock {
+ return &frostfsIDProviderMock{
+ subjects: map[util.Uint160]*client.Subject{
+ scriptHashFromSenderKey(t, senderKey): {
+ Namespace: "testnamespace",
+ Name: "test",
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExtended: map[util.Uint160]*client.SubjectExtended{
+ scriptHashFromSenderKey(t, senderKey): {
+ Namespace: "testnamespace",
+ Name: "test",
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 1,
+ Name: "test",
+ Namespace: "testnamespace",
+ KV: map[string]string{
+ "attr1": "value1",
+ "attr2": "value2",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
+ pk, err := keys.NewPublicKeyFromString(senderKey)
+ require.NoError(t, err)
+ return pk.GetScriptHash()
+}
+
+func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
+ v, ok := f.subjects[key]
+ if !ok {
+ return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
+ }
+ return v, nil
+}
+
+func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
+ v, ok := f.subjectsExtended[key]
+ if !ok {
+ return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
+ }
+ return v, nil
+}
+
+var apeCheckTestCases = []struct {
+ name string
+ container string
+ object *string
+ methods []string
+ header testHeader
+ xHeaders []session.XHeader
+ containerRules []chain.Rule
+ groupidRules []chain.Rule
+ expectAPEErr bool
+}{
+ {
+ name: "oid required requests are allowed",
+ container: containerID,
+ object: stringPtr(objectID),
+ methods: methodsRequiredOID,
+ containerRules: []chain.Rule{
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{Names: methodsRequiredOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
+ },
+ },
+ },
+ },
+ {
+ name: "oid optional requests are allowed",
+ container: containerID,
+ methods: methodsOptionalOID,
+ containerRules: []chain.Rule{
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{Names: methodsOptionalOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ },
+ },
+ },
+ {
+ name: "oid required requests are denied",
+ container: containerID,
+ object: stringPtr(objectID),
+ methods: methodsRequiredOID,
+ containerRules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: methodsRequiredOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
+ },
+ },
+ },
+ expectAPEErr: true,
+ },
+ {
+ name: "oid required requests are denied by an attribute",
+ container: containerID,
+ object: stringPtr(objectID),
+ methods: methodsRequiredOID,
+ header: testHeader{
+ headerObjSDK: &headerObjectSDKParams{
+ attributes: []struct {
+ key string
+ val string
+ }{
+ {
+ key: "attr1",
+ val: "attribute_value",
+ },
+ },
+ },
+ fromHeaderProvider: true,
+ },
+ containerRules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: methodsRequiredOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
+ },
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringLike,
+ Kind: chain.KindResource,
+ Key: "attr1",
+ Value: "attribute*",
+ },
+ },
+ },
+ },
+ expectAPEErr: true,
+ },
+ {
+ name: "oid required requests are denied by sender",
+ container: containerID,
+ object: stringPtr(objectID),
+ methods: methodsRequiredOID,
+ header: testHeader{
+ headerObjSDK: &headerObjectSDKParams{
+ attributes: []struct {
+ key string
+ val string
+ }{
+ {
+ key: "attr1",
+ val: "attribute_value",
+ },
+ },
+ },
+ fromHeaderProvider: true,
+ },
+ containerRules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: methodsRequiredOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
+ },
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringLike,
+ Kind: chain.KindRequest,
+ Key: nativeschema.PropertyKeyActorPublicKey,
+ Value: senderKey,
+ },
+ },
+ },
+ },
+ expectAPEErr: true,
+ },
+ {
+ name: "oid required requests are denied by xheader",
+ container: containerID,
+ object: stringPtr(objectID),
+ methods: methodsRequiredOID,
+ header: testHeader{
+ headerObjSDK: &headerObjectSDKParams{
+ attributes: []struct {
+ key string
+ val string
+ }{
+ {
+ key: "attr1",
+ val: "attribute_value",
+ },
+ },
+ },
+ fromHeaderProvider: true,
+ },
+ xHeaders: []session.XHeader{
+ func() (xhead session.XHeader) {
+ xhead.SetKey("X-Test-ID")
+ xhead.SetValue("aezakmi")
+ return
+ }(),
+ },
+ containerRules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: methodsRequiredOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
+ },
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringLike,
+ Kind: chain.KindRequest,
+ Key: fmt.Sprintf(commonschema.PropertyKeyFrostFSXHeader, "X-Test-ID"),
+ Value: "aezakmi",
+ },
+ },
+ },
+ },
+ expectAPEErr: true,
+ },
+ {
+ name: "optional oid requests reached quota limit by an attribute",
+ container: containerID,
+ methods: methodsOptionalOID,
+ header: testHeader{
+ headerObjSDK: &headerObjectSDKParams{
+ payloadSize: 1000,
+ },
+ fromRequestResponseHeader: true,
+ },
+ containerRules: []chain.Rule{
+ {
+ Status: chain.QuotaLimitReached,
+ Actions: chain.Actions{Names: methodsOptionalOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindResource,
+ Key: nativeschema.PropertyKeyObjectPayloadLength,
+ Value: "1000",
+ },
+ },
+ },
+ },
+ expectAPEErr: true,
+ },
+ {
+ name: "optional oid requests reached quota limit by group-id",
+ container: containerID,
+ methods: methodsOptionalOID,
+ header: testHeader{
+ headerObjSDK: &headerObjectSDKParams{
+ payloadSize: 1000,
+ },
+ fromRequestResponseHeader: true,
+ },
+ groupidRules: []chain.Rule{
+ {
+ Status: chain.QuotaLimitReached,
+ Actions: chain.Actions{Names: methodsOptionalOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: commonschema.PropertyKeyFrostFSIDGroupID,
+ Value: groupID,
+ },
+ },
+ },
+ },
+ expectAPEErr: true,
+ },
+}
+
+type stMock struct{}
+
+func (m *stMock) CurrentEpoch() uint64 {
+ return 8
+}
+
+func TestAPECheck_BearerTokenOverrides(t *testing.T) {
+ for _, test := range apeCheckTestCases {
+ t.Run(test.name, func(t *testing.T) {
+ chain := chain.Chain{
+ Rules: test.containerRules,
+ MatchType: chain.MatchTypeFirstMatch,
+ }
+ chainSDK := apeSDK.Chain{
+ Raw: chain.Bytes(),
+ }
+ bt := new(bearer.Token)
+ bt.SetIat(1)
+ bt.SetExp(10)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: apeSDK.ChainTarget{
+ TargetType: apeSDK.TargetTypeContainer,
+ Name: test.container,
+ },
+ Chains: []apeSDK.Chain{chainSDK},
+ })
+ bt.Sign(senderPrivateKey.PrivateKey)
+ var cnrOwner user.ID
+ user.IDFromKey(&cnrOwner, (ecdsa.PublicKey)(*senderPrivateKey.PublicKey()))
+
+ for _, method := range test.methods {
+ t.Run(method, func(t *testing.T) {
+ headerProvider := newHeaderProviderMock()
+ frostfsidProvider := newFrostfsIDProviderMock(t)
+
+ cnr := newContainerIDSDK(t, test.container)
+ obj := newObjectIDSDK(t, test.object)
+
+ ls := inmemory.NewInmemoryLocalStorage()
+ ms := inmemory.NewInmemoryMorphRuleChainStorage()
+
+ checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nil, &stMock{}, nil, nil)
+
+ prm := Prm{
+ Method: method,
+ Container: cnr,
+ Object: obj,
+ Role: role,
+ ContainerOwner: cnrOwner,
+ SenderKey: senderKey,
+ BearerToken: bt,
+ }
+
+ var headerObjSDK *objectSDK.Object
+ if test.header.headerObjSDK != nil {
+ headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK)
+ if test.header.fromHeaderProvider {
+ require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider")
+ headerProvider.addHeader(cnr, *obj, headerObjSDK)
+ } else if test.header.fromRequestResponseHeader {
+ prm.Header = headerObjSDK.ToV2().GetHeader()
+ }
+ }
+
+ err := checker.CheckAPE(context.Background(), prm)
+ if test.expectAPEErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+ })
+ }
+}
+
+func TestAPECheck(t *testing.T) {
+ for _, test := range apeCheckTestCases {
+ t.Run(test.name, func(t *testing.T) {
+ for _, method := range test.methods {
+ t.Run(method, func(t *testing.T) {
+ headerProvider := newHeaderProviderMock()
+ frostfsidProvider := newFrostfsIDProviderMock(t)
+
+ cnr := newContainerIDSDK(t, test.container)
+ obj := newObjectIDSDK(t, test.object)
+
+ ls := inmemory.NewInmemoryLocalStorage()
+ ms := inmemory.NewInmemoryMorphRuleChainStorage()
+
+ if len(test.containerRules) > 0 {
+ ls.AddOverride(chain.Ingress, policyengine.ContainerTarget(test.container), &chain.Chain{
+ Rules: test.containerRules,
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+ }
+
+ if len(test.groupidRules) > 0 {
+ ls.AddOverride(chain.Ingress, policyengine.GroupTarget(":"+groupID), &chain.Chain{
+ Rules: test.groupidRules,
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+ }
+
+ checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nil, &stMock{}, nil, nil)
+
+ prm := Prm{
+ Method: method,
+ Container: cnr,
+ Object: obj,
+ Role: role,
+ SenderKey: senderKey,
+ }
+
+ var headerObjSDK *objectSDK.Object
+ if test.header.headerObjSDK != nil {
+ headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK)
+ if test.header.fromHeaderProvider {
+ require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider")
+ headerProvider.addHeader(cnr, *obj, headerObjSDK)
+ } else if test.header.fromRequestResponseHeader {
+ prm.Header = headerObjSDK.ToV2().GetHeader()
+ }
+ }
+
+ err := checker.CheckAPE(context.Background(), prm)
+ if test.expectAPEErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+ })
+ }
+}
+
+type netmapStub struct {
+ netmaps map[uint64]*netmapSDK.NetMap
+ currentEpoch uint64
+}
+
+func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
+ if diff >= s.currentEpoch {
+ return nil, errors.New("invalid diff")
+ }
+ return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
+}
+
+func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
+ if nm, found := s.netmaps[epoch]; found {
+ return nm, nil
+ }
+ return nil, errors.New("netmap not found")
+}
+
+func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
+ return s.currentEpoch, nil
+}
+
+type testContainerSource struct {
+ containers map[cid.ID]*container.Container
+}
+
+func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
+ if cnr, found := s.containers[cnrID]; found {
+ return cnr, nil
+ }
+ return nil, fmt.Errorf("container not found")
+}
+
+func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
+ return nil, nil
+}
+
+func TestGetECChunk(t *testing.T) {
+ headerProvider := newHeaderProviderMock()
+ frostfsidProvider := newFrostfsIDProviderMock(t)
+
+ cnr := newContainerIDSDK(t, containerID)
+ obj := newObjectIDSDK(t, &objectID)
+
+ ls := inmemory.NewInmemoryLocalStorage()
+ ms := inmemory.NewInmemoryMorphRuleChainStorage()
+
+ ls.AddOverride(chain.Ingress, policyengine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: methodsRequiredOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindResource,
+ Key: "attr1",
+ Value: "value",
+ },
+ },
+ },
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{Names: methodsRequiredOID},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ },
+ },
+ })
+
+ node1Key, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ node1 := netmapSDK.NodeInfo{}
+ node1.SetPublicKey(node1Key.PublicKey().Bytes())
+ node2Key, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ node2 := netmapSDK.NodeInfo{}
+ node2.SetPublicKey(node1Key.PublicKey().Bytes())
+ netmap := &netmapSDK.NetMap{}
+ netmap.SetEpoch(100)
+ netmap.SetNodes([]netmapSDK.NodeInfo{node1, node2})
+
+ nm := &netmapStub{
+ currentEpoch: 100,
+ netmaps: map[uint64]*netmapSDK.NetMap{
+ 99: netmap,
+ 100: netmap,
+ },
+ }
+
+ cont := containerSDK.Container{}
+ cont.Init()
+ pp := netmapSDK.PlacementPolicy{}
+ require.NoError(t, pp.DecodeString("EC 1.1"))
+ cont.SetPlacementPolicy(pp)
+ cs := &testContainerSource{
+ containers: map[cid.ID]*container.Container{
+ cnr: {
+ Value: cont,
+ },
+ },
+ }
+
+ checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, node1Key.PublicKey().Bytes())
+
+ ecParentID := oidtest.ID()
+ chunkHeader := newHeaderObjectSDK(cnr, obj, nil).ToV2().GetHeader()
+ ecHeader := object.ECHeader{
+ Index: 1,
+ Total: 2,
+ Parent: &refs.ObjectID{},
+ }
+ chunkHeader.SetEC(&ecHeader)
+ ecParentID.WriteToV2(ecHeader.Parent)
+
+ parentHeader := newHeaderObjectSDK(cnr, &ecParentID, &headerObjectSDKParams{
+ attributes: []struct {
+ key string
+ val string
+ }{
+ {
+ key: "attr1",
+ val: "value",
+ },
+ },
+ })
+ headerProvider.addHeader(cnr, ecParentID, parentHeader)
+
+ // container node requests EC parent headers, so container node denies access by matching attribute key/value
+ t.Run("access denied on container node", func(t *testing.T) {
+ prm := Prm{
+ Method: nativeschema.MethodGetObject,
+ Container: cnr,
+ Object: obj,
+ Role: role,
+ SenderKey: hex.EncodeToString(node2Key.PublicKey().Bytes()),
+ Header: chunkHeader,
+ }
+
+ err = checker.CheckAPE(context.Background(), prm)
+ require.Error(t, err)
+ })
+
+ // non container node has no access rights to collect EC parent header, so it uses EC chunk headers
+ t.Run("access allowed on non container node", func(t *testing.T) {
+ otherKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ checker = NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, otherKey.PublicKey().Bytes())
+ prm := Prm{
+ Method: nativeschema.MethodGetObject,
+ Container: cnr,
+ Object: obj,
+ Role: nativeschema.PropertyValueContainerRoleOthers,
+ SenderKey: senderKey,
+ Header: chunkHeader,
+ }
+
+ err = checker.CheckAPE(context.Background(), prm)
+ require.NoError(t, err)
+ })
+}
diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go
new file mode 100644
index 0000000000..82e660a7f2
--- /dev/null
+++ b/pkg/services/object/ape/errors.go
@@ -0,0 +1,35 @@
+package ape
+
+import (
+ "errors"
+
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+)
+
+var (
+ errMissingContainerID = malformedRequestError("missing container ID")
+ errEmptyVerificationHeader = malformedRequestError("empty verification header")
+ errEmptyBodySig = malformedRequestError("empty at body signature")
+ errInvalidSessionSig = malformedRequestError("invalid session token signature")
+ errInvalidSessionOwner = malformedRequestError("invalid session token owner")
+ errInvalidVerb = malformedRequestError("session token verb is invalid")
+)
+
+func malformedRequestError(reason string) error {
+ invalidArgErr := &apistatus.InvalidArgument{}
+ invalidArgErr.SetMessage(reason)
+ return invalidArgErr
+}
+
+func toStatusErr(err error) error {
+ var chRouterErr *checkercore.ChainRouterError
+ if !errors.As(err, &chRouterErr) {
+ errServerInternal := &apistatus.ServerInternal{}
+ apistatus.WriteInternalServerErr(errServerInternal, err)
+ return errServerInternal
+ }
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason("ape denied request: " + err.Error())
+ return errAccessDenied
+}
diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go
new file mode 100644
index 0000000000..102985aa68
--- /dev/null
+++ b/pkg/services/object/ape/metadata.go
@@ -0,0 +1,179 @@
+package ape
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+type Metadata struct {
+ Container cid.ID
+ Object *oid.ID
+ MetaHeader *session.RequestMetaHeader
+ VerificationHeader *session.RequestVerificationHeader
+ SessionToken *sessionSDK.Object
+ BearerToken *bearer.Token
+}
+
+func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) {
+ if m.VerificationHeader == nil {
+ return nil, nil, errEmptyVerificationHeader
+ }
+
+ if m.BearerToken != nil && m.BearerToken.Impersonate() {
+ return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes())
+ }
+
+ // if session token is presented, use it as truth source
+ if m.SessionToken != nil {
+ // verify signature of session token
+ return ownerFromToken(m.SessionToken)
+ }
+
+ // otherwise get original body signature
+ bodySignature := originalBodySignature(m.VerificationHeader)
+ if bodySignature == nil {
+ return nil, nil, errEmptyBodySig
+ }
+
+ return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
+}
+
+// RequestInfo contains request information extracted by request metadata.
+type RequestInfo struct {
+ // Role defines under which role this request is executed.
+ // It must be represented only as a constant represented in native schema.
+ Role string
+
+ ContainerOwner user.ID
+
+ ContainerAttributes map[string]string
+
+ // Namespace defines to which namespace a container is belonged.
+ Namespace string
+
+ // HEX-encoded sender key.
+ SenderKey string
+}
+
+type RequestInfoExtractor interface {
+ GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error)
+}
+
+type extractor struct {
+ containers container.Source
+
+ nm netmap.Source
+
+ classifier objectCore.SenderClassifier
+}
+
+func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor {
+ return &extractor{
+ containers: containers,
+ nm: nm,
+ classifier: objectCore.NewSenderClassifier(irFetcher, nm, log),
+ }
+}
+
+func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error {
+ currentEpoch, err := e.nm.Epoch(ctx)
+ if err != nil {
+ return errors.New("can't fetch current epoch")
+ }
+ if sessionToken.ExpiredAt(currentEpoch) {
+ return new(apistatus.SessionTokenExpired)
+ }
+ if sessionToken.InvalidAt(currentEpoch) {
+ return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch)
+ }
+ if !assertVerb(*sessionToken, method) {
+ return errInvalidVerb
+ }
+ return nil
+}
+
+func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) {
+ cnr, err := e.containers.Get(ctx, m.Container)
+ if err != nil {
+ return ri, err
+ }
+
+ if m.SessionToken != nil {
+ if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil {
+ return ri, err
+ }
+ }
+
+ ownerID, ownerKey, err := m.RequestOwner()
+ if err != nil {
+ return ri, err
+ }
+ res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value)
+ if err != nil {
+ return ri, err
+ }
+
+ ri.Role = nativeSchemaRole(res.Role)
+ ri.ContainerOwner = cnr.Value.Owner()
+
+ ri.ContainerAttributes = map[string]string{}
+ for key, val := range cnr.Value.Attributes() {
+ ri.ContainerAttributes[key] = val
+ }
+
+ cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
+ if hasNamespace {
+ ri.Namespace = cnrNamespace
+ }
+
+ // it is assumed that at the moment the key will be valid,
+ // otherwise the request would not pass validation
+ ri.SenderKey = hex.EncodeToString(res.Key)
+
+ return ri, nil
+}
+
+func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
+ var sTok *sessionSDK.Object
+
+ if tokV2 != nil {
+ sTok = new(sessionSDK.Object)
+
+ err := sTok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
+ // if session relates to object's removal, we don't check
+ // relation of the tombstone to the session here since user
+ // can't predict tomb's ID.
+ err = assertSessionRelation(*sTok, cnr, nil)
+ } else {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return sTok, nil
+}
diff --git a/pkg/services/object/ape/metadata_test.go b/pkg/services/object/ape/metadata_test.go
new file mode 100644
index 0000000000..fd919008f2
--- /dev/null
+++ b/pkg/services/object/ape/metadata_test.go
@@ -0,0 +1,164 @@
+package ape
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/google/uuid"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRequestOwner(t *testing.T) {
+ containerOwner, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ userPk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ var userID user.ID
+ user.IDFromKey(&userID, userPk.PrivateKey.PublicKey)
+
+ var userSignature refs.Signature
+ userSignature.SetKey(userPk.PublicKey().Bytes())
+
+ vh := new(sessionV2.RequestVerificationHeader)
+ vh.SetBodySignature(&userSignature)
+
+ t.Run("empty verification header", func(t *testing.T) {
+ req := Metadata{}
+ checkOwner(t, req, nil, errEmptyVerificationHeader)
+ })
+ t.Run("empty verification header signature", func(t *testing.T) {
+ req := Metadata{
+ VerificationHeader: new(sessionV2.RequestVerificationHeader),
+ }
+ checkOwner(t, req, nil, errEmptyBodySig)
+ })
+ t.Run("no tokens", func(t *testing.T) {
+ req := Metadata{
+ VerificationHeader: vh,
+ }
+ checkOwner(t, req, userPk.PublicKey(), nil)
+ })
+
+ t.Run("bearer without impersonate, no session", func(t *testing.T) {
+ req := Metadata{
+ VerificationHeader: vh,
+ BearerToken: newBearer(t, containerOwner, userID, false),
+ }
+ checkOwner(t, req, userPk.PublicKey(), nil)
+ })
+ t.Run("bearer with impersonate, no session", func(t *testing.T) {
+ req := Metadata{
+ VerificationHeader: vh,
+ BearerToken: newBearer(t, containerOwner, userID, true),
+ }
+ checkOwner(t, req, containerOwner.PublicKey(), nil)
+ })
+ t.Run("bearer with impersonate, with session", func(t *testing.T) {
+ // To check that bearer token takes priority, use different key to sign session token.
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ req := Metadata{
+ VerificationHeader: vh,
+ BearerToken: newBearer(t, containerOwner, userID, true),
+ SessionToken: newSession(t, pk),
+ }
+ checkOwner(t, req, containerOwner.PublicKey(), nil)
+ })
+ t.Run("with session", func(t *testing.T) {
+ req := Metadata{
+ VerificationHeader: vh,
+ SessionToken: newSession(t, containerOwner),
+ }
+ checkOwner(t, req, containerOwner.PublicKey(), nil)
+ })
+ t.Run("malformed session token", func(t *testing.T) {
+ // This test is tricky: session token has issuer field and signature, which must correspond to each other.
+ // SDK prevents constructing such token in the first place, but it is still possible via API.
+ // Thus, construct v2 token, convert it to SDK one and pass to our function.
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ var user1 user.ID
+ user.IDFromKey(&user1, pk.PrivateKey.PublicKey)
+
+ var id refs.OwnerID
+ id.SetValue(user1.WalletBytes())
+
+ raw, err := uuid.New().MarshalBinary()
+ require.NoError(t, err)
+
+ var cidV2 refs.ContainerID
+ cidtest.ID().WriteToV2(&cidV2)
+
+ sessionCtx := new(sessionV2.ObjectSessionContext)
+ sessionCtx.SetTarget(&cidV2)
+
+ var body sessionV2.TokenBody
+ body.SetOwnerID(&id)
+ body.SetID(raw)
+ body.SetLifetime(new(sessionV2.TokenLifetime))
+ body.SetSessionKey(pk.PublicKey().Bytes())
+ body.SetContext(sessionCtx)
+
+ var tokV2 sessionV2.Token
+ tokV2.SetBody(&body)
+ require.NoError(t, sigutilV2.SignData(&containerOwner.PrivateKey, smWrapper{Token: &tokV2}))
+ require.NoError(t, sigutilV2.VerifyData(smWrapper{Token: &tokV2}))
+
+ var tok sessionSDK.Object
+ require.NoError(t, tok.ReadFromV2(tokV2))
+
+ req := Metadata{
+ VerificationHeader: vh,
+ SessionToken: &tok,
+ }
+ checkOwner(t, req, nil, errInvalidSessionOwner)
+ })
+}
+
+type smWrapper struct {
+ *sessionV2.Token
+}
+
+func (s smWrapper) ReadSignedData(data []byte) ([]byte, error) {
+ return s.Token.GetBody().StableMarshal(data), nil
+}
+
+func (s smWrapper) SignedDataSize() int {
+ return s.Token.GetBody().StableSize()
+}
+
+func newSession(t *testing.T, pk *keys.PrivateKey) *sessionSDK.Object {
+ var tok sessionSDK.Object
+ require.NoError(t, tok.Sign(pk.PrivateKey))
+ return &tok
+}
+
+func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool) *bearer.Token {
+ var tok bearer.Token
+ tok.SetImpersonate(impersonate)
+ tok.ForUser(user)
+ require.NoError(t, tok.Sign(pk.PrivateKey))
+ return &tok
+}
+
+func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) {
+ _, actual, err := req.RequestOwner()
+ if expectedErr != nil {
+ require.ErrorIs(t, err, expectedErr)
+ return
+ }
+
+ require.NoError(t, err)
+ require.Equal(t, expected, actual)
+}
diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go
new file mode 100644
index 0000000000..39dd7f4763
--- /dev/null
+++ b/pkg/services/object/ape/request.go
@@ -0,0 +1,253 @@
+package ape
+
+import (
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+
+ aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "google.golang.org/grpc/peer"
+)
+
+var defaultRequest = aperequest.Request{}
+
+var errECMissingParentObjectID = errors.New("missing EC parent object ID")
+
+func nativeSchemaRole(role acl.Role) string {
+ switch role {
+ case acl.RoleOwner:
+ return nativeschema.PropertyValueContainerRoleOwner
+ case acl.RoleContainer:
+ return nativeschema.PropertyValueContainerRoleContainer
+ case acl.RoleInnerRing:
+ return nativeschema.PropertyValueContainerRoleIR
+ case acl.RoleOthers:
+ return nativeschema.PropertyValueContainerRoleOthers
+ default:
+ return ""
+ }
+}
+
+func resourceName(cid cid.ID, oid *oid.ID, namespace string) string {
+ if namespace == "root" || namespace == "" {
+ if oid != nil {
+ return fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, cid.EncodeToString(), oid.EncodeToString())
+ }
+ return fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cid.EncodeToString())
+ }
+ if oid != nil {
+ return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObject, namespace, cid.EncodeToString(), oid.EncodeToString())
+ }
+ return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString())
+}
+
+// objectProperties collects object properties from address parameters and a header if it is passed.
+func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string {
+ objectProps := map[string]string{
+ nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(),
+ }
+
+ for attrName, attrValue := range cnrAttrs {
+ prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName)
+ objectProps[prop] = attrValue
+ }
+
+ objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString()
+
+ if oid != nil {
+ objectProps[nativeschema.PropertyKeyObjectID] = oid.String()
+ }
+
+ if header == nil {
+ return objectProps
+ }
+
+ objV2 := new(objectV2.Object)
+ objV2.SetHeader(header)
+ objSDK := objectSDK.NewFromV2(objV2)
+
+ objectProps[nativeschema.PropertyKeyObjectVersion] = objSDK.Version().String()
+ objectProps[nativeschema.PropertyKeyObjectOwnerID] = objSDK.OwnerID().EncodeToString()
+ objectProps[nativeschema.PropertyKeyObjectCreationEpoch] = strconv.Itoa(int(objSDK.CreationEpoch()))
+ objectProps[nativeschema.PropertyKeyObjectPayloadLength] = strconv.Itoa(int(objSDK.PayloadSize()))
+ objectProps[nativeschema.PropertyKeyObjectType] = objSDK.Type().String()
+
+ pcs, isSet := objSDK.PayloadChecksum()
+ if isSet {
+ objectProps[nativeschema.PropertyKeyObjectPayloadHash] = pcs.String()
+ }
+ hcs, isSet := objSDK.PayloadHomomorphicHash()
+ if isSet {
+ objectProps[nativeschema.PropertyKeyObjectHomomorphicHash] = hcs.String()
+ }
+
+ for _, attr := range header.GetAttributes() {
+ objectProps[attr.GetKey()] = attr.GetValue()
+ }
+
+ return objectProps
+}
+
+// newAPERequest creates an APE request to be passed to a chain router. It collects resource properties from
+// header provided by headerProvider. If it cannot be found in headerProvider, then properties are
+// initialized from header given in prm (if it is set). Otherwise, just CID and OID are set to properties.
+func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Request, error) {
+ switch prm.Method {
+ case nativeschema.MethodGetObject,
+ nativeschema.MethodHeadObject,
+ nativeschema.MethodRangeObject,
+ nativeschema.MethodHashObject,
+ nativeschema.MethodDeleteObject,
+ nativeschema.MethodPatchObject:
+ if prm.Object == nil {
+ return defaultRequest, fmt.Errorf("method %s: %w", prm.Method, errMissingOID)
+ }
+ case nativeschema.MethodSearchObject, nativeschema.MethodPutObject:
+ default:
+ return defaultRequest, fmt.Errorf("unknown method: %s", prm.Method)
+ }
+
+ var header *objectV2.Header
+ if prm.Header != nil {
+ header = prm.Header
+ } else if prm.Object != nil {
+ headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object, true)
+ if err == nil {
+ header = headerObjSDK.ToV2().GetHeader()
+ }
+ }
+ header, err := c.fillHeaderWithECParent(ctx, prm, header)
+ if err != nil {
+ return defaultRequest, fmt.Errorf("get EC parent header: %w", err)
+ }
+ reqProps := map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
+ nativeschema.PropertyKeyActorRole: prm.Role,
+ }
+
+ for _, xhead := range prm.XHeaders {
+ xheadKey := fmt.Sprintf(commonschema.PropertyKeyFrostFSXHeader, xhead.GetKey())
+ reqProps[xheadKey] = xhead.GetValue()
+ }
+
+ reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm)
+ if err != nil {
+ return defaultRequest, err
+ }
+
+ if p, ok := peer.FromContext(ctx); ok {
+ if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
+ reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
+ }
+ }
+
+ return aperequest.NewRequest(
+ prm.Method,
+ aperequest.NewResource(
+ resourceName(prm.Container, prm.Object, prm.Namespace),
+ objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header),
+ ),
+ reqProps,
+ ), nil
+}
+
+func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) (*objectV2.Header, error) {
+ if header == nil {
+ return header, nil
+ }
+ if header.GetEC() == nil {
+ return header, nil
+ }
+ parentObjRefID := header.GetEC().Parent
+ if parentObjRefID == nil {
+ return nil, errECMissingParentObjectID
+ }
+ var parentObjID oid.ID
+ if err := parentObjID.ReadFromV2(*parentObjRefID); err != nil {
+ return nil, fmt.Errorf("EC parent object ID format error: %w", err)
+ }
+ // only container node have access to collect parent object
+ contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container)
+ if err != nil {
+ return nil, fmt.Errorf("check container node status: %w", err)
+ }
+ if !contNode {
+ return header, nil
+ }
+ parentObj, err := c.headerProvider.GetHeader(ctx, prm.Container, parentObjID, false)
+ if err != nil {
+ if isLogicalError(err) {
+ return header, nil
+ }
+ return nil, fmt.Errorf("EC parent header request: %w", err)
+ }
+ return parentObj.ToV2().GetHeader(), nil
+}
+
+func isLogicalError(err error) bool {
+ var errObjRemoved *apistatus.ObjectAlreadyRemoved
+ var errObjNotFound *apistatus.ObjectNotFound
+ return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound)
+}
+
+func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) {
+ cnr, err := c.cnrSource.Get(ctx, cnrID)
+ if err != nil {
+ return false, err
+ }
+
+ nm, err := netmap.GetLatestNetworkMap(ctx, c.nm)
+ if err != nil {
+ return false, err
+ }
+ idCnr := make([]byte, sha256.Size)
+ cnrID.Encode(idCnr)
+
+ in, err := object.LookupKeyInContainer(nm, c.nodePK, idCnr, cnr.Value)
+ if err != nil {
+ return false, err
+ } else if in {
+ return true, nil
+ }
+
+ nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm)
+ if err != nil {
+ return false, err
+ }
+
+ return object.LookupKeyInContainer(nm, c.nodePK, idCnr, cnr.Value)
+}
+
+// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
+func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) {
+ if reqProps == nil {
+ reqProps = make(map[string]string)
+ }
+ pk, err := keys.NewPublicKeyFromString(prm.SenderKey)
+ if err != nil {
+ return nil, err
+ }
+ props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk)
+ if err != nil {
+ return reqProps, err
+ }
+ for propertyName, properyValue := range props {
+ reqProps[propertyName] = properyValue
+ }
+ return reqProps, nil
+}
diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go
new file mode 100644
index 0000000000..fcf7c4c40f
--- /dev/null
+++ b/pkg/services/object/ape/request_test.go
@@ -0,0 +1,373 @@
+package ape
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "testing"
+
+ aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
+ cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc/peer"
+)
+
+const (
+ testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y"
+
+ incomingIP = "192.92.33.1"
+
+ testSysAttrName = "unittest"
+
+ testSysAttrZone = "eggplant"
+)
+
+var containerAttrs = map[string]string{
+ cnrV2.SysAttributeName: testSysAttrName,
+ cnrV2.SysAttributeZone: testSysAttrZone,
+}
+
+func ctxWithPeerInfo() context.Context {
+ return peer.NewContext(context.Background(), &peer.Peer{
+ Addr: &net.TCPAddr{
+ IP: net.ParseIP(incomingIP),
+ Port: 41111,
+ },
+ })
+}
+
+func TestObjectProperties(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ container string
+ object *string
+ header *headerObjectSDKParams
+ }{
+ {
+ name: "fully filled header",
+ container: containerID,
+ object: stringPtr(objectID),
+ header: &headerObjectSDKParams{
+ majorVersion: 1,
+ minorVersion: 1,
+ owner: usertest.ID(),
+ epoch: 3,
+ payloadSize: 1000,
+ typ: objectSDK.TypeRegular,
+ payloadChecksum: checksumtest.Checksum(),
+ payloadHomomorphicHash: checksumtest.Checksum(),
+ attributes: []struct {
+ key string
+ val string
+ }{
+ {
+ key: "attr1",
+ val: "val1",
+ },
+ {
+ key: "attr2",
+ val: "val2",
+ },
+ },
+ },
+ },
+ {
+ name: "partially filled header",
+ container: containerID,
+ header: &headerObjectSDKParams{
+ majorVersion: 1,
+ minorVersion: 1,
+ owner: usertest.ID(),
+ epoch: 3,
+ attributes: []struct {
+ key string
+ val string
+ }{
+ {
+ key: "attr1",
+ val: "val1",
+ },
+ },
+ },
+ },
+ {
+ name: "only address paramaters set in header",
+ container: containerID,
+ object: stringPtr(objectID),
+ },
+ {
+ name: "only container set in header",
+ container: containerID,
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ cnr := newContainerIDSDK(t, test.container)
+ obj := newObjectIDSDK(t, test.object)
+ header := newHeaderObjectSDK(cnr, obj, test.header)
+
+ var testCnrOwner user.ID
+ require.NoError(t, testCnrOwner.DecodeString(testOwnerID))
+
+ props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader())
+ require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID])
+ require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID])
+
+ if obj != nil {
+ require.Equal(t, *test.object, props[nativeschema.PropertyKeyObjectID])
+ }
+
+ if test.header != nil {
+ require.Equal(t,
+ fmt.Sprintf("v%d.%d", test.header.majorVersion, test.header.minorVersion),
+ props[nativeschema.PropertyKeyObjectVersion],
+ )
+ require.Equal(t, test.header.owner.EncodeToString(), props[nativeschema.PropertyKeyObjectOwnerID])
+ require.Equal(t, fmt.Sprintf("%d", test.header.epoch), props[nativeschema.PropertyKeyObjectCreationEpoch])
+ require.Equal(t, fmt.Sprintf("%d", test.header.payloadSize), props[nativeschema.PropertyKeyObjectPayloadLength])
+ require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType])
+ require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash])
+ require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash])
+ require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)])
+ require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)])
+
+ for _, attr := range test.header.attributes {
+ require.Equal(t, attr.val, props[attr.key])
+ }
+ }
+ })
+ }
+}
+
+func TestNewAPERequest(t *testing.T) {
+ tests := []struct {
+ name string
+ methods []string
+ namespace string
+ container string
+ object *string
+ header testHeader
+ expectErr error
+ }{
+ {
+ name: "oid required requests",
+ methods: methodsRequiredOID,
+ namespace: namespace,
+ container: containerID,
+ object: stringPtr(objectID),
+ header: testHeader{
+ headerObjSDK: &headerObjectSDKParams{
+ majorVersion: 1,
+ minorVersion: 1,
+ owner: usertest.ID(),
+ epoch: 3,
+ payloadSize: 1000,
+ typ: objectSDK.TypeRegular,
+ payloadChecksum: checksumtest.Checksum(),
+ payloadHomomorphicHash: checksumtest.Checksum(),
+ },
+ fromHeaderProvider: true,
+ },
+ },
+ {
+ name: "oid required requests but header cannot be found locally",
+ methods: methodsRequiredOID,
+ namespace: namespace,
+ container: containerID,
+ object: stringPtr(objectID),
+ header: testHeader{},
+ },
+ {
+ name: "oid required requests missed oid",
+ methods: methodsRequiredOID,
+ namespace: namespace,
+ container: containerID,
+ object: nil,
+ header: testHeader{},
+ expectErr: errMissingOID,
+ },
+ {
+ name: "response for oid required requests",
+ methods: methodsRequiredOID,
+ namespace: namespace,
+ container: containerID,
+ object: stringPtr(objectID),
+ header: testHeader{
+ headerObjSDK: &headerObjectSDKParams{
+ majorVersion: 1,
+ minorVersion: 1,
+ owner: usertest.ID(),
+ epoch: 3,
+ payloadSize: 1000,
+ typ: objectSDK.TypeRegular,
+ payloadChecksum: checksumtest.Checksum(),
+ payloadHomomorphicHash: checksumtest.Checksum(),
+ },
+ fromRequestResponseHeader: true,
+ },
+ },
+ {
+ name: "oid not required methods request",
+ methods: methodsOptionalOID,
+ namespace: namespace,
+ container: containerID,
+ object: nil,
+ header: testHeader{
+ headerObjSDK: &headerObjectSDKParams{
+ majorVersion: 6,
+ minorVersion: 66,
+ owner: usertest.ID(),
+ epoch: 3,
+ typ: objectSDK.TypeLock,
+ },
+ fromRequestResponseHeader: true,
+ },
+ },
+ {
+ name: "oid not required methods request but no header",
+ methods: methodsOptionalOID,
+ namespace: namespace,
+ container: containerID,
+ object: nil,
+ header: testHeader{},
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ for _, method := range test.methods {
+ t.Run(method, func(t *testing.T) {
+ cnr := newContainerIDSDK(t, test.container)
+ obj := newObjectIDSDK(t, test.object)
+
+ var testCnrOwner user.ID
+ require.NoError(t, testCnrOwner.DecodeString(testOwnerID))
+
+ prm := Prm{
+ Namespace: test.namespace,
+ Method: method,
+ Container: cnr,
+ Object: obj,
+ Role: role,
+ SenderKey: senderKey,
+ ContainerOwner: testCnrOwner,
+ ContainerAttributes: map[string]string{
+ cnrV2.SysAttributeZone: testSysAttrZone,
+ cnrV2.SysAttributeName: testSysAttrName,
+ },
+ }
+
+ headerSource := newHeaderProviderMock()
+ ffidProvider := newFrostfsIDProviderMock(t)
+
+ var headerObjSDK *objectSDK.Object
+ if test.header.headerObjSDK != nil {
+ headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK)
+ if test.header.fromHeaderProvider {
+ require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider")
+ headerSource.addHeader(cnr, *obj, headerObjSDK)
+ } else if test.header.fromRequestResponseHeader {
+ prm.Header = headerObjSDK.ToV2().GetHeader()
+ }
+ }
+
+ c := checkerImpl{
+ headerProvider: headerSource,
+ frostFSIDClient: ffidProvider,
+ }
+
+ r, err := c.newAPERequest(ctxWithPeerInfo(), prm)
+ if test.expectErr != nil {
+ require.Error(t, err)
+ require.ErrorIs(t, err, test.expectErr)
+ return
+ }
+
+ expectedRequest := aperequest.NewRequest(
+ method,
+ aperequest.NewResource(
+ resourceName(cnr, obj, prm.Namespace),
+ objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header {
+ if headerObjSDK != nil {
+ return headerObjSDK.ToV2().GetHeader()
+ }
+ return prm.Header
+ }())),
+ map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
+ nativeschema.PropertyKeyActorRole: prm.Role,
+ fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, "tag-attr1"): "value1",
+ fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, "tag-attr2"): "value2",
+ commonschema.PropertyKeyFrostFSIDGroupID: "1",
+ commonschema.PropertyKeyFrostFSSourceIP: incomingIP,
+ },
+ )
+
+ require.Equal(t, expectedRequest, r)
+ })
+ }
+ })
+ }
+}
+
+func TestResourceName(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ namespace string
+ container string
+ object *string
+ expected string
+ }{
+ {
+ name: "non-root namespace, CID",
+ namespace: namespace,
+ container: containerID,
+ expected: fmt.Sprintf("native:object/%s/%s/*", namespace, containerID),
+ },
+ {
+ name: "non-root namespace, CID, OID",
+ namespace: namespace,
+ container: containerID,
+ object: stringPtr(objectID),
+ expected: fmt.Sprintf("native:object/%s/%s/%s", namespace, containerID, objectID),
+ },
+ {
+ name: "empty namespace, CID",
+ namespace: "",
+ container: containerID,
+ expected: fmt.Sprintf("native:object//%s/*", containerID),
+ },
+ {
+ name: "empty namespace, CID, OID",
+ namespace: "",
+ container: containerID,
+ object: stringPtr(objectID),
+ expected: fmt.Sprintf("native:object//%s/%s", containerID, objectID),
+ },
+ {
+ name: "root namespace, CID",
+ namespace: "root",
+ container: containerID,
+ expected: fmt.Sprintf("native:object//%s/*", containerID),
+ },
+ {
+ name: "root namespace, CID, OID",
+ namespace: "root",
+ container: containerID,
+ object: stringPtr(objectID),
+ expected: fmt.Sprintf("native:object//%s/%s", containerID, objectID),
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ cnr := newContainerIDSDK(t, test.container)
+ obj := newObjectIDSDK(t, test.object)
+ require.Equal(t, test.expected, resourceName(cnr, obj, test.namespace))
+ })
+ }
+}
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
new file mode 100644
index 0000000000..5e04843f37
--- /dev/null
+++ b/pkg/services/object/ape/service.go
@@ -0,0 +1,480 @@
+package ape
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+)
+
+type Service struct {
+ apeChecker Checker
+
+ extractor RequestInfoExtractor
+
+ next objectSvc.ServiceServer
+}
+
+var _ objectSvc.ServiceServer = (*Service)(nil)
+
+type HeaderProvider interface {
+ GetHeader(ctx context.Context, cnr cid.ID, oid oid.ID, local bool) (*objectSDK.Object, error)
+}
+
+type storageEngineHeaderProvider struct {
+ storageEngine *engine.StorageEngine
+ getSvc *getsvc.Service
+}
+
+func (p storageEngineHeaderProvider) GetHeader(ctx context.Context, cnr cid.ID, objID oid.ID, local bool) (*objectSDK.Object, error) {
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(objID)
+ if local {
+ return engine.Head(ctx, p.storageEngine, addr)
+ }
+ w := getsvc.NewSimpleObjectWriter()
+ var headPrm getsvc.HeadPrm
+ headPrm.WithAddress(addr)
+ headPrm.SetHeaderWriter(w)
+ headPrm.SetCommonParameters(&util.CommonPrm{}) // default values are ok
+ if err := p.getSvc.Head(ctx, headPrm); err != nil {
+ return nil, err
+ }
+ return w.Object(), nil
+}
+
+func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) HeaderProvider {
+ return storageEngineHeaderProvider{
+ storageEngine: e,
+ getSvc: s,
+ }
+}
+
+func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service {
+ return &Service{
+ apeChecker: apeChecker,
+ extractor: extractor,
+ next: next,
+ }
+}
+
+type getStreamBasicChecker struct {
+ objectSvc.GetObjectStream
+
+ apeChecker Checker
+
+ metadata Metadata
+
+ reqInfo RequestInfo
+}
+
+func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
+ if partInit, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
+ cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
+ if err != nil {
+ return toStatusErr(err)
+ }
+
+ prm := Prm{
+ Namespace: g.reqInfo.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodGetObject,
+ SenderKey: g.reqInfo.SenderKey,
+ ContainerOwner: g.reqInfo.ContainerOwner,
+ ContainerAttributes: g.reqInfo.ContainerAttributes,
+ Role: g.reqInfo.Role,
+ BearerToken: g.metadata.BearerToken,
+ XHeaders: resp.GetMetaHeader().GetXHeaders(),
+ }
+
+ if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil {
+ return toStatusErr(err)
+ }
+ }
+ return g.GetObjectStream.Send(resp)
+}
+
+func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error {
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ if err != nil {
+ return err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject)
+ if err != nil {
+ return err
+ }
+ return c.next.Get(request, &getStreamBasicChecker{
+ GetObjectStream: stream,
+ apeChecker: c.apeChecker,
+ metadata: md,
+ reqInfo: reqInfo,
+ })
+}
+
+type putStreamBasicChecker struct {
+ apeChecker Checker
+
+ extractor RequestInfoExtractor
+
+ next objectSvc.PutObjectStream
+}
+
+func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
+ if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
+ md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
+ if err != nil {
+ return err
+ }
+ reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
+ if err != nil {
+ return err
+ }
+
+ prm := Prm{
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Header: partInit.GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ Role: reqInfo.Role,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
+ }
+
+ if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
+ return toStatusErr(err)
+ }
+ }
+
+ return p.next.Send(ctx, request)
+}
+
+func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
+func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
+ streamer, err := c.next.Put(ctx)
+
+ return &putStreamBasicChecker{
+ apeChecker: c.apeChecker,
+ extractor: c.extractor,
+ next: streamer,
+ }, err
+}
+
+type patchStreamBasicChecker struct {
+ apeChecker Checker
+
+ extractor RequestInfoExtractor
+
+ next objectSvc.PatchObjectStream
+
+ nonFirstSend bool
+}
+
+func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
+ if !p.nonFirstSend {
+ p.nonFirstSend = true
+
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ if err != nil {
+ return err
+ }
+ reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject)
+ if err != nil {
+ return err
+ }
+
+ prm := Prm{
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodPatchObject,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ Role: reqInfo.Role,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
+ }
+
+ if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
+ return toStatusErr(err)
+ }
+ }
+
+ return p.next.Send(ctx, request)
+}
+
+func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
+func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) {
+ streamer, err := c.next.Patch(ctx)
+
+ return &patchStreamBasicChecker{
+ apeChecker: c.apeChecker,
+ extractor: c.extractor,
+ next: streamer,
+ }, err
+}
+
+func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ if err != nil {
+ return nil, err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := c.next.Head(ctx, request)
+ if err != nil {
+ return nil, err
+ }
+
+ header := new(objectV2.Header)
+ switch headerPart := resp.GetBody().GetHeaderPart().(type) {
+ case *objectV2.ShortHeader:
+ cidV2 := new(refs.ContainerID)
+ md.Container.WriteToV2(cidV2)
+ header.SetContainerID(cidV2)
+ header.SetVersion(headerPart.GetVersion())
+ header.SetCreationEpoch(headerPart.GetCreationEpoch())
+ header.SetOwnerID(headerPart.GetOwnerID())
+ header.SetObjectType(headerPart.GetObjectType())
+ header.SetHomomorphicHash(header.GetHomomorphicHash())
+ header.SetPayloadLength(headerPart.GetPayloadLength())
+ header.SetPayloadHash(headerPart.GetPayloadHash())
+ case *objectV2.HeaderWithSignature:
+ header = headerPart.GetHeader()
+ default:
+ return resp, nil
+ }
+
+ err = c.apeChecker.CheckAPE(ctx, Prm{
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Header: header,
+ Method: nativeschema.MethodHeadObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
+ })
+ if err != nil {
+ return nil, toStatusErr(err)
+ }
+ return resp, nil
+}
+
+func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
+ md, err := newMetadata(request, request.GetBody().GetContainerID(), nil)
+ if err != nil {
+ return err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject)
+ if err != nil {
+ return err
+ }
+
+ err = c.apeChecker.CheckAPE(stream.Context(), Prm{
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Method: nativeschema.MethodSearchObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
+ })
+ if err != nil {
+ return toStatusErr(err)
+ }
+
+ return c.next.Search(request, stream)
+}
+
+func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ if err != nil {
+ return nil, err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.apeChecker.CheckAPE(ctx, Prm{
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodDeleteObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
+ })
+ if err != nil {
+ return nil, toStatusErr(err)
+ }
+
+ resp, err := c.next.Delete(ctx, request)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error {
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ if err != nil {
+ return err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject)
+ if err != nil {
+ return err
+ }
+
+ err = c.apeChecker.CheckAPE(stream.Context(), Prm{
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodRangeObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
+ })
+ if err != nil {
+ return toStatusErr(err)
+ }
+
+ return c.next.GetRange(request, stream)
+}
+
+func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
+ md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ if err != nil {
+ return nil, err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject)
+ if err != nil {
+ return nil, err
+ }
+
+ prm := Prm{
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Method: nativeschema.MethodHashObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
+ }
+
+ resp, err := c.next.GetRangeHash(ctx, request)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
+ return nil, toStatusErr(err)
+ }
+ return resp, nil
+}
+
+func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
+ md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
+ if err != nil {
+ return nil, err
+ }
+ reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
+ if err != nil {
+ return nil, err
+ }
+
+ prm := Prm{
+ Namespace: reqInfo.Namespace,
+ Container: md.Container,
+ Object: md.Object,
+ Header: request.GetBody().GetObject().GetHeader(),
+ Method: nativeschema.MethodPutObject,
+ Role: reqInfo.Role,
+ SenderKey: reqInfo.SenderKey,
+ ContainerOwner: reqInfo.ContainerOwner,
+ ContainerAttributes: reqInfo.ContainerAttributes,
+ BearerToken: md.BearerToken,
+ XHeaders: md.MetaHeader.GetXHeaders(),
+ }
+
+ if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
+ return nil, toStatusErr(err)
+ }
+
+ return c.next.PutSingle(ctx, request)
+}
+
+type request interface {
+ GetMetaHeader() *session.RequestMetaHeader
+ GetVerificationHeader() *session.RequestVerificationHeader
+}
+
+func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) {
+ meta := request.GetMetaHeader()
+ for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
+ meta = origin
+ }
+
+ cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2)
+ if err != nil {
+ return
+ }
+ session, err := readSessionToken(cnrID, objID, meta.GetSessionToken())
+ if err != nil {
+ return
+ }
+ bearer, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return
+ }
+
+ md = Metadata{
+ Container: cnrID,
+ Object: objID,
+ VerificationHeader: request.GetVerificationHeader(),
+ SessionToken: session,
+ BearerToken: bearer,
+ }
+ return
+}
diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go
new file mode 100644
index 0000000000..97dbfa658a
--- /dev/null
+++ b/pkg/services/object/ape/types.go
@@ -0,0 +1,17 @@
+package ape
+
+import "context"
+
+// Checker provides methods to check requests and responses
+// with access policy engine.
+type Checker interface {
+ CheckAPE(context.Context, Prm) error
+}
+
+// InnerRingFetcher is an interface that must provide
+// Inner Ring information.
+type InnerRingFetcher interface {
+ // InnerRingKeys must return list of public keys of
+ // the actual inner ring.
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
+}
diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/ape/util.go
similarity index 55%
rename from pkg/services/object/acl/v2/util.go
rename to pkg/services/object/ape/util.go
index 5e3be6e52b..5cd2caa503 100644
--- a/pkg/services/object/acl/v2/util.go
+++ b/pkg/services/object/ape/util.go
@@ -1,4 +1,4 @@
-package v2
+package ape
import (
"crypto/ecdsa"
@@ -6,53 +6,34 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
-var errMissingContainerID = errors.New("missing container ID")
-
-func getContainerIDFromRequest(req any) (cid.ID, error) {
- var idV2 *refsV2.ContainerID
- var id cid.ID
-
- switch v := req.(type) {
- case *objectV2.GetRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.PutRequest:
- part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit)
- if !ok {
- return cid.ID{}, errors.New("can't get container ID in chunk")
+func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
+ if cidV2 != nil {
+ if err = cnrID.ReadFromV2(*cidV2); err != nil {
+ return
}
-
- idV2 = part.GetHeader().GetContainerID()
- case *objectV2.HeadRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.SearchRequest:
- idV2 = v.GetBody().GetContainerID()
- case *objectV2.DeleteRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.GetRangeRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- case *objectV2.GetRangeHashRequest:
- idV2 = v.GetBody().GetAddress().GetContainerID()
- default:
- return cid.ID{}, errors.New("unknown request type")
+ } else {
+ err = errMissingContainerID
+ return
}
- if idV2 == nil {
- return cid.ID{}, errMissingContainerID
+ if objV2 != nil {
+ objID = new(oid.ID)
+ if err = objID.ReadFromV2(*objV2); err != nil {
+ return
+ }
}
-
- return id, id.ReadFromV2(*idV2)
+ return
}
// originalBearerToken goes down to original request meta header and fetches
@@ -71,46 +52,6 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er
return &tok, tok.ReadFromV2(*tokV2)
}
-// originalSessionToken goes down to original request meta header and fetches
-// session token from there.
-func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) {
- for header.GetOrigin() != nil {
- header = header.GetOrigin()
- }
-
- tokV2 := header.GetSessionToken()
- if tokV2 == nil {
- return nil, nil
- }
-
- var tok sessionSDK.Object
-
- err := tok.ReadFromV2(*tokV2)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
-
- return &tok, nil
-}
-
-// getObjectIDFromRequestBody decodes oid.ID from the common interface of the
-// object reference's holders. Returns an error if object ID is missing in the request.
-func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) {
- idV2 := body.GetAddress().GetObjectID()
- if idV2 == nil {
- return nil, errors.New("missing object ID")
- }
-
- var id oid.ID
-
- err := id.ReadFromV2(*idV2)
- if err != nil {
- return nil, err
- }
-
- return &id, nil
-}
-
func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) {
// 1. First check signature of session token.
if !token.VerifySignature() {
@@ -118,7 +59,7 @@ func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error)
}
// 2. Then check if session token owner issued the session token
- // TODO(@cthulhu-rider): #1387 implement and use another approach to avoid conversion
+ // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion
var tokV2 sessionV2.Token
token.WriteToV2(&tokV2)
@@ -164,31 +105,33 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-// assertVerb checks that token verb corresponds to op.
-func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
- //nolint:exhaustive
- switch op {
- case acl.OpObjectPut:
- return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete)
- case acl.OpObjectDelete:
+// assertVerb checks that token verb corresponds to the method.
+func assertVerb(tok sessionSDK.Object, method string) bool {
+ switch method {
+ case nativeschema.MethodPutObject:
+ return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
+ case nativeschema.MethodDeleteObject:
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
- case acl.OpObjectGet:
+ case nativeschema.MethodGetObject:
return tok.AssertVerb(sessionSDK.VerbObjectGet)
- case acl.OpObjectHead:
+ case nativeschema.MethodHeadObject:
return tok.AssertVerb(
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectGet,
sessionSDK.VerbObjectDelete,
sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash)
- case acl.OpObjectSearch:
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectPatch,
+ )
+ case nativeschema.MethodSearchObject:
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
- case acl.OpObjectRange:
- return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash)
- case acl.OpObjectHash:
+ case nativeschema.MethodRangeObject:
+ return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
+ case nativeschema.MethodHashObject:
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
+ case nativeschema.MethodPatchObject:
+ return tok.AssertVerb(sessionSDK.VerbObjectPatch)
}
-
return false
}
@@ -212,3 +155,15 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error
return nil
}
+
+func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
+ key, err := unmarshalPublicKey(rawKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid signature key: %w", err)
+ }
+
+ var idSender user.ID
+ user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
+
+ return &idSender, key, nil
+}
diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go
new file mode 100644
index 0000000000..916bce4271
--- /dev/null
+++ b/pkg/services/object/ape/util_test.go
@@ -0,0 +1,84 @@
+package ape
+
+import (
+ "slices"
+ "testing"
+
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsVerbCompatible(t *testing.T) {
+ table := map[string][]sessionSDK.ObjectVerb{
+ nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch},
+ nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete},
+ nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet},
+ nativeschema.MethodHeadObject: {
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectPatch,
+ },
+ nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch},
+ nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash},
+ nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
+ nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch},
+ }
+
+ verbs := []sessionSDK.ObjectVerb{
+ sessionSDK.VerbObjectPut,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectSearch,
+ sessionSDK.VerbObjectPatch,
+ }
+
+ var tok sessionSDK.Object
+
+ for op, list := range table {
+ for _, verb := range verbs {
+ contains := slices.Contains(list, verb)
+
+ tok.ForVerb(verb)
+
+ require.Equal(t, contains, assertVerb(tok, op),
+ "%v in token, %s executing", verb, op)
+ }
+ }
+}
+
+func TestAssertSessionRelation(t *testing.T) {
+ var tok sessionSDK.Object
+ cnr := cidtest.ID()
+ cnrOther := cidtest.ID()
+ obj := oidtest.ID()
+ objOther := oidtest.ID()
+
+ // make sure ids differ, otherwise test won't work correctly
+ require.False(t, cnrOther.Equals(cnr))
+ require.False(t, objOther.Equals(obj))
+
+ // bind session to the container (required)
+ tok.BindContainer(cnr)
+
+ // test container-global session
+ require.NoError(t, assertSessionRelation(tok, cnr, nil))
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnrOther, nil))
+ require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
+
+ // limit the session to the particular object
+ tok.LimitByObjects(obj)
+
+ // test fixed object session (here obj arg must be non-nil everywhere)
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnr, &objOther))
+}
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
new file mode 100644
index 0000000000..f8ee089fe2
--- /dev/null
+++ b/pkg/services/object/audit.go
@@ -0,0 +1,233 @@
+package object
+
+import (
+ "context"
+ "errors"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+var _ ServiceServer = (*auditService)(nil)
+
+type auditService struct {
+ next ServiceServer
+ log *logger.Logger
+ enabled *atomic.Bool
+}
+
+func NewAuditService(next ServiceServer, log *logger.Logger, enabled *atomic.Bool) ServiceServer {
+ return &auditService{
+ next: next,
+ log: log,
+ enabled: enabled,
+ }
+}
+
+// Delete implements ServiceServer.
+func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
+ res, err := a.next.Delete(ctx, req)
+ if !a.enabled.Load() {
+ return res, err
+ }
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
+ return res, err
+}
+
+// Get implements ServiceServer.
+func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error {
+ err := a.next.Get(req, stream)
+ if !a.enabled.Load() {
+ return err
+ }
+ audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
+ return err
+}
+
+// GetRange implements ServiceServer.
+func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error {
+ err := a.next.GetRange(req, stream)
+ if !a.enabled.Load() {
+ return err
+ }
+ audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
+ return err
+}
+
+// GetRangeHash implements ServiceServer.
+func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
+ resp, err := a.next.GetRangeHash(ctx, req)
+ if !a.enabled.Load() {
+ return resp, err
+ }
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
+ return resp, err
+}
+
+// Head implements ServiceServer.
+func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
+ resp, err := a.next.Head(ctx, req)
+ if !a.enabled.Load() {
+ return resp, err
+ }
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
+ return resp, err
+}
+
+// Put implements ServiceServer.
+func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) {
+ res, err := a.next.Put(ctx)
+ if !a.enabled.Load() {
+ return res, err
+ }
+ if err != nil {
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
+ return res, err
+ }
+ return &auditPutStream{
+ stream: res,
+ log: a.log,
+ }, nil
+}
+
+// PutSingle implements ServiceServer.
+func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
+ resp, err := a.next.PutSingle(ctx, req)
+ if !a.enabled.Load() {
+ return resp, err
+ }
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
+ audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(),
+ req.GetBody().GetObject().GetObjectID()),
+ err == nil)
+ return resp, err
+}
+
+// Search implements ServiceServer.
+func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) error {
+ err := a.next.Search(req, stream)
+ if !a.enabled.Load() {
+ return err
+ }
+ audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
+ audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
+ return err
+}
+
+var _ PutObjectStream = (*auditPutStream)(nil)
+
+type auditPutStream struct {
+ stream PutObjectStream
+ log *logger.Logger
+
+ failed bool
+ key []byte
+ containerID *refs.ContainerID
+ objectID *refs.ObjectID
+}
+
+// CloseAndRecv implements PutObjectStream.
+func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ resp, err := a.stream.CloseAndRecv(ctx)
+ if err != nil {
+ a.failed = true
+ }
+ a.objectID = resp.GetBody().GetObjectID()
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
+ !a.failed)
+ return resp, err
+}
+
+// Send implements PutObjectStream.
+func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error {
+ if partInit, ok := req.GetBody().GetObjectPart().(*object.PutObjectPartInit); ok {
+ a.containerID = partInit.GetHeader().GetContainerID()
+ a.objectID = partInit.GetObjectID()
+ a.key = req.GetVerificationHeader().GetBodySignature().GetKey()
+ }
+
+ err := a.stream.Send(ctx, req)
+ if err != nil {
+ a.failed = true
+ }
+ if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
+ audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
+ !a.failed)
+ }
+ return err
+}
+
+type auditPatchStream struct {
+ stream PatchObjectStream
+ log *logger.Logger
+
+ failed bool
+ key []byte
+ containerID *refs.ContainerID
+ objectID *refs.ObjectID
+
+ nonFirstSend bool
+}
+
+func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ res, err := a.next.Patch(ctx)
+ if !a.enabled.Load() {
+ return res, err
+ }
+ if err != nil {
+ audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
+ return res, err
+ }
+ return &auditPatchStream{
+ stream: res,
+ log: a.log,
+ }, nil
+}
+
+// CloseAndRecv implements PatchObjectStream.
+func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
+ resp, err := a.stream.CloseAndRecv(ctx)
+ if err != nil {
+ a.failed = true
+ }
+ a.objectID = resp.GetBody().GetObjectID()
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
+ !a.failed)
+ return resp, err
+}
+
+// Send implements PatchObjectStream.
+func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) error {
+ if !a.nonFirstSend {
+ a.containerID = req.GetBody().GetAddress().GetContainerID()
+ a.objectID = req.GetBody().GetAddress().GetObjectID()
+ a.key = req.GetVerificationHeader().GetBodySignature().GetKey()
+ a.nonFirstSend = true
+ }
+
+ err := a.stream.Send(ctx, req)
+ if err != nil {
+ a.failed = true
+ }
+ if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
+ !a.failed)
+ }
+ return err
+}
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index e797f1a64d..ef65e78bc6 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -3,7 +3,7 @@ package object
import (
"context"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
@@ -32,11 +32,9 @@ func (x *Common) Init(state NodeState, nextHandler ServiceServer) {
x.nextHandler = nextHandler
}
-var errMaintenance apistatus.NodeUnderMaintenance
-
func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error {
if x.state.IsMaintenance() {
- return errMaintenance
+ return new(apistatus.NodeUnderMaintenance)
}
return x.nextHandler.Get(req, stream)
@@ -44,15 +42,23 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error {
func (x *Common) Put(ctx context.Context) (PutObjectStream, error) {
if x.state.IsMaintenance() {
- return nil, errMaintenance
+ return nil, new(apistatus.NodeUnderMaintenance)
}
return x.nextHandler.Put(ctx)
}
+func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) {
+ if x.state.IsMaintenance() {
+ return nil, new(apistatus.NodeUnderMaintenance)
+ }
+
+ return x.nextHandler.Patch(ctx)
+}
+
func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
if x.state.IsMaintenance() {
- return nil, errMaintenance
+ return nil, new(apistatus.NodeUnderMaintenance)
}
return x.nextHandler.Head(ctx, req)
@@ -60,7 +66,7 @@ func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2
func (x *Common) Search(req *objectV2.SearchRequest, stream SearchStream) error {
if x.state.IsMaintenance() {
- return errMaintenance
+ return new(apistatus.NodeUnderMaintenance)
}
return x.nextHandler.Search(req, stream)
@@ -68,7 +74,7 @@ func (x *Common) Search(req *objectV2.SearchRequest, stream SearchStream) error
func (x *Common) Delete(ctx context.Context, req *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
if x.state.IsMaintenance() {
- return nil, errMaintenance
+ return nil, new(apistatus.NodeUnderMaintenance)
}
return x.nextHandler.Delete(ctx, req)
@@ -76,7 +82,7 @@ func (x *Common) Delete(ctx context.Context, req *objectV2.DeleteRequest) (*obje
func (x *Common) GetRange(req *objectV2.GetRangeRequest, stream GetObjectRangeStream) error {
if x.state.IsMaintenance() {
- return errMaintenance
+ return new(apistatus.NodeUnderMaintenance)
}
return x.nextHandler.GetRange(req, stream)
@@ -84,8 +90,16 @@ func (x *Common) GetRange(req *objectV2.GetRangeRequest, stream GetObjectRangeSt
func (x *Common) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
if x.state.IsMaintenance() {
- return nil, errMaintenance
+ return nil, new(apistatus.NodeUnderMaintenance)
}
return x.nextHandler.GetRangeHash(ctx, req)
}
+
+func (x *Common) PutSingle(ctx context.Context, req *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
+ if x.state.IsMaintenance() {
+ return nil, new(apistatus.NodeUnderMaintenance)
+ }
+
+ return x.nextHandler.PutSingle(ctx, req)
+}
diff --git a/pkg/services/object/common/target/builder.go b/pkg/services/object/common/target/builder.go
new file mode 100644
index 0000000000..ea68365a75
--- /dev/null
+++ b/pkg/services/object/common/target/builder.go
@@ -0,0 +1,54 @@
+package target
+
+import (
+ "context"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+)
+
+var _ transformer.ChunkedObjectWriter = (*inMemoryObjectBuilder)(nil)
+
+type inMemoryObjectBuilder struct {
+ objectWriter transformer.ObjectWriter
+ payload *payload
+
+ obj *objectSDK.Object
+}
+
+func newInMemoryObjectBuilder(objectWriter transformer.ObjectWriter) *inMemoryObjectBuilder {
+ return &inMemoryObjectBuilder{
+ objectWriter: objectWriter,
+ payload: getPayload(),
+ }
+}
+
+func (b *inMemoryObjectBuilder) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
+ defer func() {
+ putPayload(b.payload)
+ b.payload = nil
+ }()
+
+ b.obj.SetPayload(b.payload.Data)
+
+ if err := b.objectWriter.WriteObject(ctx, b.obj); err != nil {
+ return nil, err
+ }
+
+ id, _ := b.obj.ID()
+ return &transformer.AccessIdentifiers{
+ SelfID: id,
+ }, nil
+}
+
+func (b *inMemoryObjectBuilder) Write(_ context.Context, p []byte) (int, error) {
+ b.payload.Data = append(b.payload.Data, p...)
+
+ return len(p), nil
+}
+
+func (b *inMemoryObjectBuilder) WriteHeader(_ context.Context, obj *objectSDK.Object) error {
+ b.obj = obj
+
+ return nil
+}
diff --git a/pkg/services/object/common/target/pool.go b/pkg/services/object/common/target/pool.go
new file mode 100644
index 0000000000..71da305adb
--- /dev/null
+++ b/pkg/services/object/common/target/pool.go
@@ -0,0 +1,30 @@
+package target
+
+import (
+ "sync"
+)
+
+const (
+ defaultAllocSize = 1024
+ poolSliceMaxSize = 128 * 1024
+)
+
+type payload struct {
+ Data []byte
+}
+
+var putBytesPool = &sync.Pool{
+ New: func() any { return &payload{Data: make([]byte, 0, defaultAllocSize)} },
+}
+
+func getPayload() *payload {
+ return putBytesPool.Get().(*payload)
+}
+
+func putPayload(p *payload) {
+ if cap(p.Data) > poolSliceMaxSize {
+ return
+ }
+ p.Data = p.Data[:0]
+ putBytesPool.Put(p)
+}
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
new file mode 100644
index 0000000000..f2bd907dbe
--- /dev/null
+++ b/pkg/services/object/common/target/target.go
@@ -0,0 +1,168 @@
+package target
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ // prepare needed put parameters
+ if err := preparePrm(ctx, &prm); err != nil {
+ return nil, fmt.Errorf("could not prepare put parameters: %w", err)
+ }
+
+ if prm.Header.Signature() != nil {
+ return newUntrustedTarget(ctx, &prm)
+ }
+ return newTrustedTarget(ctx, &prm)
+}
+
+func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
+ if maxPayloadSz == 0 {
+ return nil, errors.New("could not obtain max object size parameter")
+ }
+
+ if prm.SignRequestPrivateKey == nil {
+ nodeKey, err := prm.Config.KeyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+ prm.SignRequestPrivateKey = nodeKey
+ }
+
+ // prepare untrusted-Put object target
+ return &validatingPreparedTarget{
+ nextTarget: newInMemoryObjectBuilder(objectwriter.New(prm)),
+ fmt: prm.Config.FormatValidator,
+
+ maxPayloadSz: maxPayloadSz,
+ }, nil
+}
+
+func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ prm.Relay = nil // do not relay request without signature
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
+ if maxPayloadSz == 0 {
+ return nil, errors.New("could not obtain max object size parameter")
+ }
+
+ sToken := prm.Common.SessionToken()
+
+ // prepare trusted-Put object target
+
+ // get private token from local storage
+ var sessionInfo *util.SessionInfo
+
+ if sToken != nil {
+ sessionInfo = &util.SessionInfo{
+ ID: sToken.ID(),
+ Owner: sToken.Issuer(),
+ }
+ }
+
+ key, err := prm.Config.KeyStorage.GetKey(sessionInfo)
+ if err != nil {
+ return nil, fmt.Errorf("could not receive session key: %w", err)
+ }
+
+ // In case session token is missing, the line above returns the default key.
+ // If it isn't owner key, replication attempts will fail, thus this check.
+ ownerObj := prm.Header.OwnerID()
+ if ownerObj.IsEmpty() {
+ return nil, errors.New("missing object owner")
+ }
+
+ if sToken == nil {
+ var ownerSession user.ID
+ user.IDFromKey(&ownerSession, key.PublicKey)
+
+ if !ownerObj.Equals(ownerSession) {
+ return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession)
+ }
+ } else if !ownerObj.Equals(sessionInfo.Owner) {
+ return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
+ }
+
+ if prm.SignRequestPrivateKey == nil {
+ prm.SignRequestPrivateKey = key
+ }
+
+ return &validatingTarget{
+ fmt: prm.Config.FormatValidator,
+ nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: key,
+ NextTargetInit: func() transformer.ObjectWriter { return objectwriter.New(prm) },
+ NetworkState: prm.Config.NetworkState,
+ MaxSize: maxPayloadSz,
+ WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.Container),
+ SessionToken: sToken,
+ }),
+ }, nil
+}
+
+func preparePrm(ctx context.Context, prm *objectwriter.Params) error {
+ var err error
+
+ // get latest network map
+ nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource)
+ if err != nil {
+ return fmt.Errorf("could not get latest network map: %w", err)
+ }
+
+ idCnr, ok := prm.Header.ContainerID()
+ if !ok {
+ return errors.New("missing container ID")
+ }
+
+ // get container to store the object
+ cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr)
+ if err != nil {
+ return fmt.Errorf("could not get container by ID: %w", err)
+ }
+
+ prm.Container = cnrInfo.Value
+
+ // add common options
+ prm.TraverseOpts = append(prm.TraverseOpts,
+ // set processing container
+ placement.ForContainer(prm.Container),
+ )
+
+ if ech := prm.Header.ECHeader(); ech != nil {
+ prm.TraverseOpts = append(prm.TraverseOpts,
+ // set identifier of the processing object
+ placement.ForObject(ech.Parent()),
+ )
+ } else if id, ok := prm.Header.ID(); ok {
+ prm.TraverseOpts = append(prm.TraverseOpts,
+ // set identifier of the processing object
+ placement.ForObject(id),
+ )
+ }
+
+ // create placement builder from network map
+ builder := placement.NewNetworkMapBuilder(nm)
+
+ if prm.Common.LocalOnly() {
+ // restrict success count to 1 stored copy (to local storage)
+ prm.TraverseOpts = append(prm.TraverseOpts, placement.SuccessAfter(1))
+
+ // use local-only placement builder
+ builder = util.NewLocalPlacement(builder, prm.Config.NetmapKeys)
+ }
+
+ // set placement builder
+ prm.TraverseOpts = append(prm.TraverseOpts, placement.UseBuilder(builder))
+
+ return nil
+}
diff --git a/pkg/services/object/common/target/validation.go b/pkg/services/object/common/target/validation.go
new file mode 100644
index 0000000000..b29721d01e
--- /dev/null
+++ b/pkg/services/object/common/target/validation.go
@@ -0,0 +1,145 @@
+package target
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "hash"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+)
+
+// validatingTarget validates unprepared object format and content (streaming PUT case).
+type validatingTarget struct {
+ nextTarget transformer.ChunkedObjectWriter
+
+ fmt *object.FormatValidator
+}
+
+// validatingPreparedTarget validates prepared object format and content.
+type validatingPreparedTarget struct {
+ nextTarget transformer.ChunkedObjectWriter
+
+ fmt *object.FormatValidator
+
+ hash hash.Hash
+
+ checksum []byte
+
+ maxPayloadSz uint64 // network config
+
+ payloadSz uint64 // payload size of the streaming object from header
+
+ writtenPayload uint64 // number of already written payload bytes
+}
+
+var (
+ // ErrExceedingMaxSize is returned when payload size is greater than the limit.
+ ErrExceedingMaxSize = errors.New("payload size is greater than the limit")
+ // ErrWrongPayloadSize is returned when chunk payload size is greater than the length declared in header.
+ ErrWrongPayloadSize = errors.New("wrong payload size")
+)
+
+func (t *validatingTarget) WriteHeader(ctx context.Context, obj *objectSDK.Object) error {
+ if err := t.fmt.Validate(ctx, obj, true); err != nil {
+ return fmt.Errorf("(%T) could not validate object format: %w", t, err)
+ }
+
+ return t.nextTarget.WriteHeader(ctx, obj)
+}
+
+func (t *validatingTarget) Write(ctx context.Context, p []byte) (n int, err error) {
+ return t.nextTarget.Write(ctx, p)
+}
+
+func (t *validatingTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
+ return t.nextTarget.Close(ctx)
+}
+
+func (t *validatingPreparedTarget) WriteHeader(ctx context.Context, obj *objectSDK.Object) error {
+ t.payloadSz = obj.PayloadSize()
+ chunkLn := uint64(len(obj.Payload()))
+
+ // check chunk size
+ if chunkLn > t.payloadSz {
+ return ErrWrongPayloadSize
+ }
+
+ // check payload size limit
+ if t.payloadSz > t.maxPayloadSz {
+ return ErrExceedingMaxSize
+ }
+
+ cs, csSet := obj.PayloadChecksum()
+ if !csSet {
+ return errors.New("missing payload checksum")
+ }
+
+ switch typ := cs.Type(); typ {
+ default:
+ return fmt.Errorf("(%T) unsupported payload checksum type %v", t, typ)
+ case checksum.SHA256:
+ t.hash = sha256.New()
+ case checksum.TZ:
+ t.hash = tz.New()
+ }
+
+ t.checksum = cs.Value()
+
+ if err := t.fmt.Validate(ctx, obj, false); err != nil {
+ return fmt.Errorf("(%T) could not validate object format: %w", t, err)
+ }
+
+ err := t.nextTarget.WriteHeader(ctx, obj)
+ if err != nil {
+ return err
+ }
+
+ // update written bytes
+ //
+ // Note: we MUST NOT add obj.PayloadSize() since obj
+ // can carry only the chunk of the full payload
+ t.writtenPayload += chunkLn
+
+ return nil
+}
+
+func (t *validatingPreparedTarget) Write(ctx context.Context, p []byte) (n int, err error) {
+ chunkLn := uint64(len(p))
+
+ // check if new chunk will overflow payload size
+ if t.writtenPayload+chunkLn > t.payloadSz {
+ return 0, ErrWrongPayloadSize
+ }
+
+ _, err = t.hash.Write(p)
+ if err != nil {
+ return
+ }
+
+ n, err = t.nextTarget.Write(ctx, p)
+ if err == nil {
+ t.writtenPayload += uint64(n)
+ }
+
+ return
+}
+
+func (t *validatingPreparedTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
+ // check payload size correctness
+ if t.payloadSz != t.writtenPayload {
+ return nil, ErrWrongPayloadSize
+ }
+
+ if !bytes.Equal(t.hash.Sum(nil), t.checksum) {
+ return nil, fmt.Errorf("(%T) incorrect payload checksum", t)
+ }
+
+ return t.nextTarget.Close(ctx)
+}
diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go
new file mode 100644
index 0000000000..6593d3ca08
--- /dev/null
+++ b/pkg/services/object/common/writer/common.go
@@ -0,0 +1,114 @@
+package writer
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.uber.org/zap"
+)
+
+type NodeIterator struct {
+ Traversal
+ cfg *Config
+}
+
+func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
+ return &NodeIterator{
+ Traversal: Traversal{
+ Opts: opts,
+ Exclude: make(map[string]*bool),
+ },
+ cfg: c,
+ }
+}
+
+func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
+ traverser, err := placement.NewTraverser(ctx, n.Opts...)
+ if err != nil {
+ return fmt.Errorf("could not create object placement traverser: %w", err)
+ }
+
+ resErr := &atomic.Value{}
+
+ // Must iterate over all replicas, regardless of whether there are identical nodes there.
+ // At the same time need to exclude identical nodes from processing.
+ for {
+ addrs := traverser.Next()
+ if len(addrs) == 0 {
+ break
+ }
+
+ if n.forEachAddress(ctx, traverser, addrs, f, resErr) {
+ break
+ }
+ }
+
+ if !traverser.Success() {
+ var err errIncompletePut
+ err.singleErr, _ = resErr.Load().(error)
+ return err
+ }
+
+ // perform additional container broadcast if needed
+ if n.submitPrimaryPlacementFinish() {
+ err := n.ForEachNode(ctx, f)
+ if err != nil {
+ n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
+ // we don't fail primary operation because of broadcast failure
+ }
+ }
+
+ return nil
+}
+
+func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, NodeDescriptor) error, resErr *atomic.Value) bool {
+ var wg sync.WaitGroup
+
+ for _, addr := range addrs {
+ if ok := n.Exclude[string(addr.PublicKey())]; ok != nil {
+ if *ok {
+ traverser.SubmitSuccess()
+ }
+ // This can happen only during additional container broadcast.
+ continue
+ }
+
+ isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey())
+
+ item := new(bool)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr})
+ if err != nil {
+ resErr.Store(err)
+ svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err)
+ return
+ }
+
+ traverser.SubmitSuccess()
+ *item = true
+ }()
+
+ // Mark the container node as processed in order to exclude it
+ // in subsequent container broadcast. Note that we don't
+ // process this node during broadcast if primary placement
+ // on it failed.
+ n.submitProcessed(addr, item)
+ }
+
+ wg.Wait()
+
+ return false
+}
+
+func NeedAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
+ return len(obj.Children()) > 0 || (!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock))
+}
diff --git a/pkg/services/object/common/writer/dispatcher.go b/pkg/services/object/common/writer/dispatcher.go
new file mode 100644
index 0000000000..bb9a54ce9d
--- /dev/null
+++ b/pkg/services/object/common/writer/dispatcher.go
@@ -0,0 +1,23 @@
+package writer
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+)
+
+var _ transformer.ObjectWriter = (*objectWriterDispatcher)(nil)
+
+type objectWriterDispatcher struct {
+ ecWriter transformer.ObjectWriter
+ repWriter transformer.ObjectWriter
+}
+
+func (m *objectWriterDispatcher) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+ if object.IsECSupported(obj) {
+ return m.ecWriter.WriteObject(ctx, obj)
+ }
+ return m.repWriter.WriteObject(ctx, obj)
+}
diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go
new file mode 100644
index 0000000000..fff58aca71
--- /dev/null
+++ b/pkg/services/object/common/writer/distributed.go
@@ -0,0 +1,135 @@
+package writer
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+)
+
+type preparedObjectTarget interface {
+ WriteObject(context.Context, *objectSDK.Object, object.ContentMeta) error
+}
+
+type distributedWriter struct {
+ cfg *Config
+
+ placementOpts []placement.Option
+
+ obj *objectSDK.Object
+ objMeta object.ContentMeta
+
+ nodeTargetInitializer func(NodeDescriptor) preparedObjectTarget
+
+ relay func(context.Context, NodeDescriptor) error
+
+ resetSuccessAfterOnBroadcast bool
+}
+
+// Traversal parameters and state of container.
+type Traversal struct {
+ Opts []placement.Option
+
+ // need of additional broadcast after the object is saved
+ ExtraBroadcastEnabled bool
+
+ // container nodes which was processed during the primary object placement
+ Exclude map[string]*bool
+
+ ResetSuccessAfterOnBroadcast bool
+}
+
+// updates traversal parameters after the primary placement finish and
+// returns true if additional container broadcast is needed.
+func (x *Traversal) submitPrimaryPlacementFinish() bool {
+ if x.ExtraBroadcastEnabled {
+ // do not track success during container broadcast (best-effort)
+ x.Opts = append(x.Opts, placement.WithoutSuccessTracking())
+
+ if x.ResetSuccessAfterOnBroadcast {
+ x.Opts = append(x.Opts, placement.ResetSuccessAfter())
+ }
+
+ // avoid 2nd broadcast
+ x.ExtraBroadcastEnabled = false
+
+ return true
+ }
+
+ return false
+}
+
+// marks the container node as processed during the primary object placement.
+func (x *Traversal) submitProcessed(n placement.Node, item *bool) {
+ if x.ExtraBroadcastEnabled {
+ key := string(n.PublicKey())
+
+ if x.Exclude == nil {
+ x.Exclude = make(map[string]*bool, 1)
+ }
+
+ x.Exclude[key] = item
+ }
+}
+
+type NodeDescriptor struct {
+ Local bool
+
+ Info placement.Node
+}
+
+// errIncompletePut is returned if processing on a container fails.
+type errIncompletePut struct {
+ singleErr error // error from the last responding node
+}
+
+func (x errIncompletePut) Error() string {
+ const commonMsg = "incomplete object PUT by placement"
+
+ if x.singleErr != nil {
+ return fmt.Sprintf("%s: %v", commonMsg, x.singleErr)
+ }
+
+ return commonMsg
+}
+
+func (x errIncompletePut) Unwrap() error {
+ return x.singleErr
+}
+
+// WriteObject implements the transformer.ObjectWriter interface.
+func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+ t.obj = obj
+
+ var err error
+
+ if t.objMeta, err = t.cfg.FormatValidator.ValidateContent(t.obj); err != nil {
+ return fmt.Errorf("(%T) could not validate payload content: %w", t, err)
+ }
+ return t.iteratePlacement(ctx)
+}
+
+func (t *distributedWriter) sendObject(ctx context.Context, node NodeDescriptor) error {
+ if !node.Local && t.relay != nil {
+ return t.relay(ctx, node)
+ }
+
+ target := t.nodeTargetInitializer(node)
+
+ err := target.WriteObject(ctx, t.obj, t.objMeta)
+ if err != nil {
+ return fmt.Errorf("could not write header: %w", err)
+ }
+ return nil
+}
+
+func (t *distributedWriter) iteratePlacement(ctx context.Context) error {
+ id, _ := t.obj.ID()
+
+ iter := t.cfg.NewNodeIterator(append(t.placementOpts, placement.ForObject(id)))
+ iter.ExtraBroadcastEnabled = NeedAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
+ iter.ResetSuccessAfterOnBroadcast = t.resetSuccessAfterOnBroadcast
+ return iter.ForEachNode(ctx, t.sendObject)
+}
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
new file mode 100644
index 0000000000..26a53e3156
--- /dev/null
+++ b/pkg/services/object/common/writer/ec.go
@@ -0,0 +1,355 @@
+package writer
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
+ svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+var _ transformer.ObjectWriter = (*ECWriter)(nil)
+
+var (
+ errUnsupportedECObject = errors.New("object is not supported for erasure coding")
+ errFailedToSaveAllECParts = errors.New("failed to save all EC parts")
+)
+
+type ECWriter struct {
+ Config *Config
+ PlacementOpts []placement.Option
+ Container containerSDK.Container
+ Key *ecdsa.PrivateKey
+ CommonPrm *svcutil.CommonPrm
+ Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
+
+ ObjectMeta object.ContentMeta
+ ObjectMetaValid bool
+
+ remoteRequestSignKey *ecdsa.PrivateKey
+}
+
+func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+ relayed, isContainerNode, err := e.relayIfNotContainerNode(ctx, obj)
+ if err != nil {
+ return err
+ }
+ if relayed {
+ return nil
+ }
+
+ if !object.IsECSupported(obj) {
+ // must be resolved by caller
+ return errUnsupportedECObject
+ }
+
+ if !e.ObjectMetaValid {
+ if e.ObjectMeta, err = e.Config.FormatValidator.ValidateContent(obj); err != nil {
+ return fmt.Errorf("(%T) could not validate payload content: %w", e, err)
+ }
+ e.ObjectMetaValid = true
+ }
+
+ if isContainerNode {
+ restoreTokens := e.CommonPrm.ForgetTokens()
+ defer restoreTokens()
+ // As request executed on container node, so sign request with container key.
+ e.remoteRequestSignKey, err = e.Config.KeyStorage.GetKey(nil)
+ if err != nil {
+ return err
+ }
+ } else {
+ e.remoteRequestSignKey = e.Key
+ }
+
+ if obj.ECHeader() != nil {
+ return e.writeECPart(ctx, obj)
+ }
+ return e.writeRawObject(ctx, obj)
+}
+
+func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) {
+ currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx)
+ if err != nil {
+ return false, false, err
+ }
+ if currentNodeIsContainerNode {
+ // object can be splitted or saved local
+ return false, true, nil
+ }
+ if e.Relay == nil {
+ return false, currentNodeIsContainerNode, nil
+ }
+ objID := object.AddressOf(obj).Object()
+ var index uint32
+ if obj.ECHeader() != nil {
+ objID = obj.ECHeader().Parent()
+ index = obj.ECHeader().Index()
+ }
+ if err := e.relayToContainerNode(ctx, objID, index); err != nil {
+ return false, false, err
+ }
+ return true, currentNodeIsContainerNode, nil
+}
+
+func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) {
+ t, err := placement.NewTraverser(ctx, e.PlacementOpts...)
+ if err != nil {
+ return false, err
+ }
+ for {
+ nodes := t.Next()
+ if len(nodes) == 0 {
+ break
+ }
+ for _, node := range nodes {
+ if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) {
+ return true, nil
+ }
+ }
+ }
+ return false, nil
+}
+
+func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
+ t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
+ if err != nil {
+ return err
+ }
+ var lastErr error
+ offset := int(index)
+ for {
+ nodes := t.Next()
+ if len(nodes) == 0 {
+ break
+ }
+ for idx := range nodes {
+ node := nodes[(idx+offset)%len(nodes)]
+ var info client.NodeInfo
+ client.NodeInfoFromNetmapElement(&info, node)
+
+ c, err := e.Config.ClientConstructor.Get(info)
+ if err != nil {
+ return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
+ }
+
+ err = e.Relay(ctx, info, c)
+ if err == nil {
+ return nil
+ }
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
+ lastErr = err
+ }
+ }
+ if lastErr == nil {
+ return nil
+ }
+ return errIncompletePut{
+ singleErr: lastErr,
+ }
+}
+
+func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error {
+ if e.CommonPrm.LocalOnly() {
+ return e.writePartLocal(ctx, obj)
+ }
+
+ t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
+ if err != nil {
+ return err
+ }
+
+ eg, egCtx := errgroup.WithContext(ctx)
+ for {
+ nodes := t.Next()
+ if len(nodes) == 0 {
+ break
+ }
+
+ eg.Go(func() error {
+ return e.writePart(egCtx, obj, int(obj.ECHeader().Index()), nodes, make([]atomic.Bool, len(nodes)))
+ })
+ t.SubmitSuccess()
+ }
+ if err := eg.Wait(); err != nil {
+ return errIncompletePut{
+ singleErr: err,
+ }
+ }
+ return nil
+}
+
+func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) error {
+ // now only single EC policy is supported
+ c, err := erasurecode.NewConstructor(policy.ECDataCount(e.Container.PlacementPolicy()), policy.ECParityCount(e.Container.PlacementPolicy()))
+ if err != nil {
+ return err
+ }
+ parts, err := c.Split(obj, e.Key)
+ if err != nil {
+ return err
+ }
+ partsProcessed := make([]atomic.Bool, len(parts))
+ objID, _ := obj.ID()
+ t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
+ if err != nil {
+ return err
+ }
+
+ for {
+ eg, egCtx := errgroup.WithContext(ctx)
+ nodes := t.Next()
+ if len(nodes) == 0 {
+ break
+ }
+
+ visited := make([]atomic.Bool, len(nodes))
+ for idx := range parts {
+ visited[idx%len(nodes)].Store(true)
+ }
+
+ for idx := range parts {
+ if !partsProcessed[idx].Load() {
+ eg.Go(func() error {
+ err := e.writePart(egCtx, parts[idx], idx, nodes, visited)
+ if err == nil {
+ partsProcessed[idx].Store(true)
+ t.SubmitSuccess()
+ }
+ return err
+ })
+ }
+ }
+ err = eg.Wait()
+ }
+ if err != nil {
+ return errIncompletePut{
+ singleErr: err,
+ }
+ }
+ for idx := range partsProcessed {
+ if !partsProcessed[idx].Load() {
+ return errIncompletePut{
+ singleErr: errFailedToSaveAllECParts,
+ }
+ }
+ }
+ return nil
+}
+
+func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx int, nodes []placement.Node, visited []atomic.Bool) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ // try to save to node for current part index
+ node := nodes[partIdx%len(nodes)]
+ err := e.putECPartToNode(ctx, obj, node)
+ if err == nil {
+ return nil
+ } else if clientSDK.IsErrObjectAlreadyRemoved(err) {
+ return err
+ }
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
+ zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
+
+ partVisited := make([]bool, len(nodes))
+ partVisited[partIdx%len(nodes)] = true
+
+ // try to save to any node not visited by any of other parts
+ for i := 1; i < len(nodes); i++ {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ idx := (partIdx + i) % len(nodes)
+ if !visited[idx].CompareAndSwap(false, true) {
+ continue
+ }
+ node = nodes[idx]
+ err := e.putECPartToNode(ctx, obj, node)
+ if err == nil {
+ return nil
+ }
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
+ zap.String("node", hex.EncodeToString(node.PublicKey())),
+ zap.Error(err))
+
+ partVisited[idx] = true
+ }
+
+ // try to save to any node not visited by current part
+ for i := range nodes {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if partVisited[i] {
+ continue
+ }
+ node = nodes[i]
+ err := e.putECPartToNode(ctx, obj, node)
+ if err == nil {
+ return nil
+ }
+ e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
+ zap.String("node", hex.EncodeToString(node.PublicKey())),
+ zap.Error(err))
+ }
+
+ return fmt.Errorf("failed to save EC chunk %s to any node", object.AddressOf(obj))
+}
+
+func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
+ if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) {
+ return e.writePartLocal(ctx, obj)
+ }
+ return e.writePartRemote(ctx, obj, node)
+}
+
+func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
+ localTarget := LocalTarget{
+ Storage: e.Config.LocalStore,
+ Container: e.Container,
+ }
+ return localTarget.WriteObject(ctx, obj, e.ObjectMeta)
+}
+
+func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
+ var clientNodeInfo client.NodeInfo
+ client.NodeInfoFromNetmapElement(&clientNodeInfo, node)
+
+ remoteTaget := remoteWriter{
+ privateKey: e.remoteRequestSignKey,
+ clientConstructor: e.Config.ClientConstructor,
+ commonPrm: e.CommonPrm,
+ nodeInfo: clientNodeInfo,
+ }
+
+ return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
+}
diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go
new file mode 100644
index 0000000000..d5eeddf21a
--- /dev/null
+++ b/pkg/services/object/common/writer/ec_test.go
@@ -0,0 +1,190 @@
+package writer
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "slices"
+ "strconv"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+type testPlacementBuilder struct {
+ vectors [][]netmap.NodeInfo
+}
+
+func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
+ [][]netmap.NodeInfo, error,
+) {
+ arr := slices.Clone(p.vectors[0])
+ return [][]netmap.NodeInfo{arr}, nil
+}
+
+type nmKeys struct{}
+
+func (nmKeys) IsLocalKey(_ []byte) bool {
+ return false
+}
+
+type clientConstructor struct {
+ vectors [][]netmap.NodeInfo
+}
+
+func (c clientConstructor) Get(info client.NodeInfo) (client.MultiAddressClient, error) {
+ if bytes.Equal(info.PublicKey(), c.vectors[0][0].PublicKey()) ||
+ bytes.Equal(info.PublicKey(), c.vectors[0][1].PublicKey()) {
+ return multiAddressClient{err: errors.New("node unavailable")}, nil
+ }
+ return multiAddressClient{}, nil
+}
+
+type multiAddressClient struct {
+ client.MultiAddressClient
+ err error
+}
+
+func (c multiAddressClient) ObjectPutSingle(_ context.Context, _ apiclient.PrmObjectPutSingle) (*apiclient.ResObjectPutSingle, error) {
+ if c.err != nil {
+ return nil, c.err
+ }
+ return &apiclient.ResObjectPutSingle{}, nil
+}
+
+func (c multiAddressClient) ReportError(error) {
+}
+
+func (multiAddressClient) RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error {
+ return nil
+}
+
+func TestECWriter(t *testing.T) {
+ // Create container with policy EC 1.1
+ cnr := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetECDataCount(1)
+ x1.SetECParityCount(1)
+ p1.AddReplicas(x1)
+ cnr.SetPlacementPolicy(p1)
+ cnr.SetAttribute("cnr", "cnr1")
+
+ cid := cidtest.ID()
+
+ // Create 4 nodes, 2 nodes for chunks,
+ // 2 nodes for the case when the first two will fail.
+ ns, _ := testNodeMatrix(t, []int{4})
+
+ data := make([]byte, 100)
+ _, _ = rand.Read(data)
+ ver := version.Current()
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(data))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ obj := objectSDK.New()
+ obj.SetID(oidtest.ID())
+ obj.SetOwnerID(usertest.ID())
+ obj.SetContainerID(cid)
+ obj.SetVersion(&ver)
+ obj.SetPayload(data)
+ obj.SetPayloadSize(uint64(len(data)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+
+ // Builder return nodes without sort by hrw
+ builder := &testPlacementBuilder{
+ vectors: ns,
+ }
+
+ ownerKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ nodeKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ log, err := logger.NewLogger(logger.Prm{})
+ require.NoError(t, err)
+
+ var n nmKeys
+ ecw := ECWriter{
+ Config: &Config{
+ NetmapKeys: n,
+ Logger: log,
+ ClientConstructor: clientConstructor{vectors: ns},
+ KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil),
+ },
+ PlacementOpts: append(
+ []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)},
+ placement.WithCopyNumbers(nil)), // copies number ignored for EC
+ Container: cnr,
+ Key: &ownerKey.PrivateKey,
+ Relay: nil,
+ ObjectMetaValid: true,
+ }
+
+ err = ecw.WriteObject(context.Background(), obj)
+ require.NoError(t, err)
+}
+
+func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
+ mNodes := make([][]netmap.NodeInfo, len(dim))
+ mAddr := make([][]string, len(dim))
+
+ for i := range dim {
+ ns := make([]netmap.NodeInfo, dim[i])
+ as := make([]string, dim[i])
+
+ for j := range dim[i] {
+ a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
+ strconv.Itoa(i),
+ strconv.Itoa(60000+j),
+ )
+
+ var ni netmap.NodeInfo
+ ni.SetNetworkEndpoints(a)
+ ni.SetPublicKey([]byte(a))
+
+ var na network.AddressGroup
+
+ err := na.FromIterator(netmapcore.Node(ni))
+ require.NoError(t, err)
+
+ as[j] = network.StringifyGroup(na)
+
+ ns[j] = ni
+ }
+
+ mNodes[i] = ns
+ mAddr[i] = as
+ }
+
+ return mNodes, mAddr
+}
diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go
new file mode 100644
index 0000000000..cf3d032756
--- /dev/null
+++ b/pkg/services/object/common/writer/local.go
@@ -0,0 +1,55 @@
+package writer
+
+import (
+ "context"
+ "fmt"
+
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+// ObjectStorage is an object storage interface.
+type ObjectStorage interface {
+ // Put must save passed object
+ // and return any appeared error.
+ Put(context.Context, *objectSDK.Object, bool) error
+ // Delete must delete passed objects
+ // and return any appeared error.
+ Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error
+ // Lock must lock passed objects
+ // and return any appeared error.
+ Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error
+ // IsLocked must clarify object's lock status.
+ IsLocked(context.Context, oid.Address) (bool, error)
+}
+
+type LocalTarget struct {
+ Storage ObjectStorage
+ Container containerSDK.Container
+}
+
+func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
+ if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
+ return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
+ }
+
+ switch meta.Type() {
+ case objectSDK.TypeTombstone:
+ err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
+ if err != nil {
+ return fmt.Errorf("could not delete objects from tombstone locally: %w", err)
+ }
+ case objectSDK.TypeLock:
+ err := t.Storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects())
+ if err != nil {
+ return fmt.Errorf("could not lock object from lock objects locally: %w", err)
+ }
+ default:
+ // objects that do not change meta storage
+ }
+
+ return nil
+}
diff --git a/pkg/services/object/put/remote.go b/pkg/services/object/common/writer/remote.go
similarity index 64%
rename from pkg/services/object/put/remote.go
rename to pkg/services/object/common/writer/remote.go
index 0f0dc355af..697613ff78 100644
--- a/pkg/services/object/put/remote.go
+++ b/pkg/services/object/common/writer/remote.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -10,22 +10,19 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
-type remoteTarget struct {
- ctx context.Context
-
+type remoteWriter struct {
privateKey *ecdsa.PrivateKey
commonPrm *util.CommonPrm
nodeInfo clientcore.NodeInfo
- obj *object.Object
-
clientConstructor ClientConstructor
}
@@ -41,38 +38,46 @@ type RemoteSender struct {
type RemotePutPrm struct {
node netmap.NodeInfo
- obj *object.Object
+ obj *objectSDK.Object
}
-func (t *remoteTarget) WriteObject(obj *object.Object, _ objectcore.ContentMeta) error {
- t.obj = obj
-
- return nil
-}
-
-func (t *remoteTarget) Close() (*transformer.AccessIdentifiers, error) {
+func (t *remoteWriter) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error {
c, err := t.clientConstructor.Get(t.nodeInfo)
if err != nil {
- return nil, fmt.Errorf("(%T) could not create SDK client %s: %w", t, t.nodeInfo, err)
+ return fmt.Errorf("(%T) could not create SDK client %s: %w", t, t.nodeInfo, err)
}
var prm internalclient.PutObjectPrm
- prm.SetContext(t.ctx)
prm.SetClient(c)
prm.SetPrivateKey(t.privateKey)
prm.SetSessionToken(t.commonPrm.SessionToken())
prm.SetBearerToken(t.commonPrm.BearerToken())
prm.SetXHeaders(t.commonPrm.XHeaders())
- prm.SetObject(t.obj)
+ prm.SetObject(obj)
- res, err := internalclient.PutObject(prm)
- if err != nil {
- return nil, fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
+ err = t.putSingle(ctx, prm)
+ if status.Code(err) != codes.Unimplemented {
+ return err
}
- return new(transformer.AccessIdentifiers).
- WithSelfID(res.ID()), nil
+ return t.putStream(ctx, prm)
+}
+
+func (t *remoteWriter) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error {
+ _, err := internalclient.PutObject(ctx, prm)
+ if err != nil {
+ return fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
+ }
+ return nil
+}
+
+func (t *remoteWriter) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error {
+ _, err := internalclient.PutObjectSingle(ctx, prm)
+ if err != nil {
+ return fmt.Errorf("(%T) could not put single object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
+ }
+ return nil
}
// NewRemoteSender creates, initializes and returns new RemoteSender instance.
@@ -93,7 +98,7 @@ func (p *RemotePutPrm) WithNodeInfo(v netmap.NodeInfo) *RemotePutPrm {
}
// WithObject sets transferred object.
-func (p *RemotePutPrm) WithObject(v *object.Object) *RemotePutPrm {
+func (p *RemotePutPrm) WithObject(v *objectSDK.Object) *RemotePutPrm {
if p != nil {
p.obj = v
}
@@ -108,8 +113,7 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
return err
}
- t := &remoteTarget{
- ctx: ctx,
+ t := &remoteWriter{
privateKey: key,
clientConstructor: s.clientConstructor,
}
@@ -119,9 +123,7 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
return fmt.Errorf("parse client node info: %w", err)
}
- if err := t.WriteObject(p.obj, objectcore.ContentMeta{}); err != nil {
- return fmt.Errorf("(%T) could not send object header: %w", s, err)
- } else if _, err := t.Close(); err != nil {
+ if err := t.WriteObject(ctx, p.obj, objectcore.ContentMeta{}); err != nil {
return fmt.Errorf("(%T) could not send object: %w", s, err)
}
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
new file mode 100644
index 0000000000..d3d2b41b43
--- /dev/null
+++ b/pkg/services/object/common/writer/writer.go
@@ -0,0 +1,168 @@
+package writer
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
+ objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+)
+
+type MaxSizeSource interface {
+ // MaxObjectSize returns maximum payload size
+ // of physically stored object in system.
+ //
+ // Must return 0 if value can not be obtained.
+ MaxObjectSize(context.Context) uint64
+}
+
+type ClientConstructor interface {
+ Get(client.NodeInfo) (client.MultiAddressClient, error)
+}
+
+type InnerRing interface {
+ InnerRingKeys(ctx context.Context) ([][]byte, error)
+}
+
+type FormatValidatorConfig interface {
+ VerifySessionTokenIssuer() bool
+}
+
+// Config represents a set of static parameters that are established during
+// the initialization phase of all services.
+type Config struct {
+ KeyStorage *objutil.KeyStorage
+
+ MaxSizeSrc MaxSizeSource
+
+ LocalStore ObjectStorage
+
+ ContainerSource container.Source
+
+ NetmapSource netmap.Source
+
+ NetmapKeys netmap.AnnouncedKeys
+
+ FormatValidator *object.FormatValidator
+
+ NetworkState netmap.State
+
+ ClientConstructor ClientConstructor
+
+ Logger *logger.Logger
+
+ VerifySessionTokenIssuer bool
+}
+
+type Option func(*Config)
+
+func WithLogger(l *logger.Logger) Option {
+ return func(c *Config) {
+ c.Logger = l
+ }
+}
+
+func WithVerifySessionTokenIssuer(v bool) Option {
+ return func(c *Config) {
+ c.VerifySessionTokenIssuer = v
+ }
+}
+
+type Params struct {
+ Config *Config
+
+ Common *objutil.CommonPrm
+
+ Header *objectSDK.Object
+
+ Container containerSDK.Container
+
+ TraverseOpts []placement.Option
+
+ Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
+
+ SignRequestPrivateKey *ecdsa.PrivateKey
+}
+
+func New(prm *Params) transformer.ObjectWriter {
+ if container.IsECContainer(prm.Container) && object.IsECSupported(prm.Header) {
+ return newECWriter(prm)
+ }
+ return newDefaultObjectWriter(prm, false)
+}
+
+func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.ObjectWriter {
+ var relay func(context.Context, NodeDescriptor) error
+ if prm.Relay != nil {
+ relay = func(ctx context.Context, node NodeDescriptor) error {
+ var info client.NodeInfo
+
+ client.NodeInfoFromNetmapElement(&info, node.Info)
+
+ c, err := prm.Config.ClientConstructor.Get(info)
+ if err != nil {
+ return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
+ }
+
+ return prm.Relay(ctx, info, c)
+ }
+ }
+
+ var resetSuccessAfterOnBroadcast bool
+ traverseOpts := prm.TraverseOpts
+ if forECPlacement && !prm.Common.LocalOnly() {
+ // save non-regular and linking object to EC container.
+ // EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc.
+ traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.Container.PlacementPolicy())+1)))
+ resetSuccessAfterOnBroadcast = true
+ }
+
+ return &distributedWriter{
+ cfg: prm.Config,
+ placementOpts: traverseOpts,
+ resetSuccessAfterOnBroadcast: resetSuccessAfterOnBroadcast,
+ nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget {
+ if node.Local {
+ return LocalTarget{
+ Storage: prm.Config.LocalStore,
+ Container: prm.Container,
+ }
+ }
+
+ rt := &remoteWriter{
+ privateKey: prm.SignRequestPrivateKey,
+ commonPrm: prm.Common,
+ clientConstructor: prm.Config.ClientConstructor,
+ }
+
+ client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.Info)
+
+ return rt
+ },
+ relay: relay,
+ }
+}
+
+func newECWriter(prm *Params) transformer.ObjectWriter {
+ return &objectWriterDispatcher{
+ ecWriter: &ECWriter{
+ Config: prm.Config,
+ PlacementOpts: append(prm.TraverseOpts, placement.WithCopyNumbers(nil)), // copies number ignored for EC
+ Container: prm.Container,
+ Key: prm.SignRequestPrivateKey,
+ CommonPrm: prm.Common,
+ Relay: prm.Relay,
+ },
+ repWriter: newDefaultObjectWriter(prm, true),
+ }
+}
diff --git a/pkg/services/object/delete/container.go b/pkg/services/object/delete/container.go
deleted file mode 100644
index a2f099d5b1..0000000000
--- a/pkg/services/object/delete/container.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package deletesvc
-
-func (exec *execCtx) executeOnContainer() {
- exec.log.Debug("request is not rolled over to the container")
-}
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index 4a9c476d03..57e33fde7a 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -3,6 +3,7 @@ package deletesvc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"go.uber.org/zap"
)
@@ -23,39 +24,22 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
exec := &execCtx{
svc: s,
- ctx: ctx,
prm: prm,
}
exec.setLogger(s.log)
- exec.execute()
-
- return exec.statusError.err
+ return exec.execute(ctx)
}
-func (exec *execCtx) execute() {
- exec.log.Debug("serving request...")
+func (exec *execCtx) execute(ctx context.Context) error {
+ exec.log.Debug(ctx, logs.ServingRequest)
- // perform local operation
- exec.executeLocal()
-
- exec.analyzeStatus(true)
-}
-
-func (exec *execCtx) analyzeStatus(execCnr bool) {
- // analyze local result
- switch exec.status {
- case statusOK:
- exec.log.Debug("operation finished successfully")
- default:
- exec.log.Debug("operation finished with error",
- zap.String("error", exec.err.Error()),
- )
-
- if execCnr {
- exec.executeOnContainer()
- exec.analyzeStatus(false)
- }
+ if err := exec.executeLocal(ctx); err != nil {
+ exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
+ return err
}
+
+ exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ return nil
}
diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go
index 112796f273..a99ba3586d 100644
--- a/pkg/services/object/delete/exec.go
+++ b/pkg/services/object/delete/exec.go
@@ -2,60 +2,49 @@ package deletesvc
import (
"context"
+ "errors"
+ "fmt"
+ "slices"
"strconv"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
-type statusError struct {
- status int
- err error
-}
+var errDeleteECChunk = errors.New("invalid operation: delete EC object chunk")
type execCtx struct {
svc *Service
- ctx context.Context
-
prm Prm
- statusError
-
log *logger.Logger
- tombstone *object.Tombstone
+ tombstone *objectSDK.Tombstone
- splitInfo *object.SplitInfo
+ splitInfo *objectSDK.SplitInfo
- tombstoneObj *object.Object
+ tombstoneObj *objectSDK.Object
}
-const (
- statusUndefined int = iota
- statusOK
-)
-
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = &logger.Logger{Logger: l.With(
+ exec.log = l.With(
zap.String("request", "DELETE"),
zap.Stringer("address", exec.address()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )}
+ )
}
-func (exec execCtx) context() context.Context {
- return exec.ctx
-}
-
-func (exec execCtx) isLocal() bool {
+func (exec *execCtx) isLocal() bool {
return exec.prm.common.LocalOnly()
}
@@ -79,136 +68,113 @@ func (exec *execCtx) newAddress(id oid.ID) oid.Address {
return a
}
-func (exec *execCtx) formSplitInfo() bool {
- var err error
+func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
+ obj, err := exec.svc.header.head(ctx, exec)
- exec.splitInfo, err = exec.svc.header.splitInfo(exec)
+ var errSplitInfo *objectSDK.SplitInfoError
+ var errECInfo *objectSDK.ECInfoError
switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not compose split info",
- zap.String("error", err.Error()),
- )
case err == nil:
- exec.status = statusOK
- exec.err = nil
+ if ech := obj.ECHeader(); ech != nil {
+ return errDeleteECChunk
+ }
+ return nil
+ case errors.As(err, &errSplitInfo):
+ exec.splitInfo = errSplitInfo.SplitInfo()
+ exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
+
+ exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
+
+ if err := exec.collectMembers(ctx); err != nil {
+ return err
+ }
+
+ exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected)
+ return nil
+ case errors.As(err, &errECInfo):
+ exec.log.Debug(ctx, logs.DeleteECObjectReceived)
+ return nil
}
- return err == nil
+ if !apiclient.IsErrObjectAlreadyRemoved(err) {
+ // IsErrObjectAlreadyRemoved check is required because splitInfo
+ // implicitly performs Head request that may return ObjectAlreadyRemoved
+ // status that is not specified for Delete.
+ return err
+ }
+
+ return nil
}
-func (exec *execCtx) collectMembers() (ok bool) {
+func (exec *execCtx) collectMembers(ctx context.Context) error {
if exec.splitInfo == nil {
- exec.log.Debug("no split info, object is PHY")
- return true
+ exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY)
+ return nil
}
+ var err error
if _, withLink := exec.splitInfo.Link(); withLink {
- ok = exec.collectChildren()
+ err = exec.collectChildren(ctx)
}
- if !ok {
+ if err != nil {
if _, withLast := exec.splitInfo.LastPart(); withLast {
- ok = exec.collectChain()
- if !ok {
- return
+ if err := exec.collectChain(ctx); err != nil {
+ return err
}
}
} // may be fail if neither right nor linking ID is set?
- return exec.supplementBySplitID()
+ return exec.supplementBySplitID(ctx)
}
-func (exec *execCtx) collectChain() bool {
+func (exec *execCtx) collectChain(ctx context.Context) error {
var chain []oid.ID
- exec.log.Debug("assembling chain...")
+ exec.log.Debug(ctx, logs.DeleteAssemblingChain)
for prev, withPrev := exec.splitInfo.LastPart(); withPrev; {
chain = append(chain, prev)
- p, err := exec.svc.header.previous(exec, prev)
+ p, err := exec.svc.header.previous(ctx, exec, prev)
+ if err != nil {
+ return fmt.Errorf("get previous split element for %s: %w", prev, err)
+ }
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not get previous split element",
- zap.Stringer("id", prev),
- zap.String("error", err.Error()),
- )
-
- return false
- case err == nil:
- exec.status = statusOK
- exec.err = nil
-
- withPrev = p != nil
- if withPrev {
- prev = *p
- }
+ withPrev = p != nil
+ if withPrev {
+ prev = *p
}
}
exec.addMembers(chain)
-
- return true
+ return nil
}
-func (exec *execCtx) collectChildren() bool {
- exec.log.Debug("collecting children...")
+func (exec *execCtx) collectChildren(ctx context.Context) error {
+ exec.log.Debug(ctx, logs.DeleteCollectingChildren)
- children, err := exec.svc.header.children(exec)
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not collect object children",
- zap.String("error", err.Error()),
- )
-
- return false
- case err == nil:
- exec.status = statusOK
- exec.err = nil
-
- link, _ := exec.splitInfo.Link()
-
- exec.addMembers(append(children, link))
-
- return true
+ children, err := exec.svc.header.children(ctx, exec)
+ if err != nil {
+ return fmt.Errorf("collect children: %w", err)
}
+
+ link, _ := exec.splitInfo.Link()
+ exec.addMembers(append(children, link))
+ return nil
}
-func (exec *execCtx) supplementBySplitID() bool {
- exec.log.Debug("supplement by split ID")
+func (exec *execCtx) supplementBySplitID(ctx context.Context) error {
+ exec.log.Debug(ctx, logs.DeleteSupplementBySplitID)
- chain, err := exec.svc.searcher.splitMembers(exec)
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not search for split chain members",
- zap.String("error", err.Error()),
- )
-
- return false
- case err == nil:
- exec.status = statusOK
- exec.err = nil
-
- exec.addMembers(chain)
-
- return true
+ chain, err := exec.svc.searcher.splitMembers(ctx, exec)
+ if err != nil {
+ return fmt.Errorf("search split chain members: %w", err)
}
+
+ exec.addMembers(chain)
+ return nil
}
func (exec *execCtx) addMembers(incoming []oid.ID) {
@@ -217,7 +183,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
for i := range members {
for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body
if members[i].Equals(incoming[j]) {
- incoming = append(incoming[:j], incoming[j+1:]...)
+ incoming = slices.Delete(incoming, j, j+1)
j--
}
}
@@ -226,63 +192,42 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
exec.tombstone.SetMembers(append(members, incoming...))
}
-func (exec *execCtx) initTombstoneObject() bool {
+func (exec *execCtx) initTombstoneObject() error {
payload, err := exec.tombstone.Marshal()
if err != nil {
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not marshal tombstone structure",
- zap.String("error", err.Error()),
- )
-
- return false
+ return fmt.Errorf("marshal tombstone: %w", err)
}
- exec.tombstoneObj = object.New()
+ exec.tombstoneObj = objectSDK.New()
exec.tombstoneObj.SetContainerID(exec.containerID())
- exec.tombstoneObj.SetType(object.TypeTombstone)
+ exec.tombstoneObj.SetType(objectSDK.TypeTombstone)
exec.tombstoneObj.SetPayload(payload)
tokenSession := exec.commonParameters().SessionToken()
if tokenSession != nil {
issuer := tokenSession.Issuer()
- exec.tombstoneObj.SetOwnerID(&issuer)
+ exec.tombstoneObj.SetOwnerID(issuer)
} else {
// make local node a tombstone object owner
localUser := exec.svc.netInfo.LocalNodeID()
- exec.tombstoneObj.SetOwnerID(&localUser)
+ exec.tombstoneObj.SetOwnerID(localUser)
}
- var a object.Attribute
+ var a objectSDK.Attribute
a.SetKey(objectV2.SysAttributeExpEpoch)
a.SetValue(strconv.FormatUint(exec.tombstone.ExpirationEpoch(), 10))
exec.tombstoneObj.SetAttributes(a)
- return true
+ return nil
}
-func (exec *execCtx) saveTombstone() bool {
- id, err := exec.svc.placer.put(exec)
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not save the tombstone",
- zap.String("error", err.Error()),
- )
-
- return false
- case err == nil:
- exec.status = statusOK
- exec.err = nil
-
- exec.prm.tombAddrWriter.
- SetAddress(exec.newAddress(*id))
+func (exec *execCtx) saveTombstone(ctx context.Context) error {
+ id, err := exec.svc.placer.put(ctx, exec)
+ if err != nil {
+ return fmt.Errorf("save tombstone: %w", err)
}
- return true
+ exec.prm.tombAddrWriter.SetAddress(exec.newAddress(*id))
+ return nil
}
diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go
index 36af96448e..01b2d9b3f2 100644
--- a/pkg/services/object/delete/local.go
+++ b/pkg/services/object/delete/local.go
@@ -1,65 +1,43 @@
package deletesvc
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
)
-func (exec *execCtx) executeLocal() {
- exec.log.Debug("forming tombstone structure...")
+func (exec *execCtx) executeLocal(ctx context.Context) error {
+ exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure)
- ok := exec.formTombstone()
- if !ok {
- return
+ if err := exec.formTombstone(ctx); err != nil {
+ return err
}
- exec.log.Debug("tombstone structure successfully formed, saving...")
+ exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
- exec.saveTombstone()
+ return exec.saveTombstone(ctx)
}
-func (exec *execCtx) formTombstone() (ok bool) {
+func (exec *execCtx) formTombstone(ctx context.Context) error {
tsLifetime, err := exec.svc.netInfo.TombstoneLifetime()
if err != nil {
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not read tombstone lifetime config",
- zap.String("error", err.Error()),
- )
-
- return false
+ return fmt.Errorf("fetch tombstone lifetime: %w", err)
}
- exec.tombstone = object.NewTombstone()
+ exec.tombstone = objectSDK.NewTombstone()
exec.tombstone.SetExpirationEpoch(
exec.svc.netInfo.CurrentEpoch() + tsLifetime,
)
exec.addMembers([]oid.ID{exec.address().Object()})
- exec.log.Debug("forming split info...")
+ exec.log.Debug(ctx, logs.DeleteFormingSplitInfo)
- ok = exec.formSplitInfo()
- if !ok {
- return
+ if err := exec.formExtendedInfo(ctx); err != nil {
+ return fmt.Errorf("form extended info: %w", err)
}
- exec.log.Debug("split info successfully formed, collecting members...")
-
- exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
-
- ok = exec.collectMembers()
- if !ok {
- return
- }
-
- exec.log.Debug("members successfully collected")
-
- ok = exec.initTombstoneObject()
- if !ok {
- return
- }
-
- return true
+ return exec.initTombstoneObject()
}
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index f2ea384de1..1c4d7d5853 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -1,13 +1,15 @@
package deletesvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"go.uber.org/zap"
@@ -25,11 +27,11 @@ type Option func(*cfg)
type NetworkInfo interface {
netmap.State
- // Must return the lifespan of the tombstones
+ // TombstoneLifetime must return the lifespan of the tombstones
// in the FrostFS epochs.
TombstoneLifetime() (uint64, error)
- // Returns user ID of the local storage node. Result must not be nil.
+ // LocalNodeID returns user ID of the local storage node. Result must not be nil.
// New tombstone objects will have the result as an owner ID if removal is executed w/o a session.
LocalNodeID() user.ID
}
@@ -39,20 +41,20 @@ type cfg struct {
header interface {
// must return (nil, nil) for PHY objects
- splitInfo(*execCtx) (*object.SplitInfo, error)
+ head(context.Context, *execCtx) (*objectSDK.Object, error)
- children(*execCtx) ([]oid.ID, error)
+ children(context.Context, *execCtx) ([]oid.ID, error)
// must return (nil, nil) for 1st object in chain
- previous(*execCtx, oid.ID) (*oid.ID, error)
+ previous(context.Context, *execCtx, oid.ID) (*oid.ID, error)
}
searcher interface {
- splitMembers(*execCtx) ([]oid.ID, error)
+ splitMembers(context.Context, *execCtx) ([]oid.ID, error)
}
placer interface {
- put(*execCtx) (*oid.ID, error)
+ put(context.Context, *execCtx) (*oid.ID, error)
}
netInfo NetworkInfo
@@ -60,16 +62,23 @@ type cfg struct {
keyStorage *util.KeyStorage
}
-func defaultCfg() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
// New creates, initializes and returns utility serving
// Object.Get service requests.
-func New(opts ...Option) *Service {
- c := defaultCfg()
+func New(gs *getsvc.Service,
+ ss *searchsvc.Service,
+ ps *putsvc.Service,
+ ni NetworkInfo,
+ ks *util.KeyStorage,
+ opts ...Option,
+) *Service {
+ c := &cfg{
+ log: logger.NewLoggerWrapper(zap.L()),
+ header: &headSvcWrapper{s: gs},
+ searcher: &searchSvcWrapper{s: ss},
+ placer: &putSvcWrapper{s: ps},
+ netInfo: ni,
+ keyStorage: ks,
+ }
for i := range opts {
opts[i](c)
@@ -83,42 +92,6 @@ func New(opts ...Option) *Service {
// WithLogger returns option to specify Delete service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Delete service"))}
- }
-}
-
-// WithHeadService returns option to set Head service
-// to work with object headers.
-func WithHeadService(h *getsvc.Service) Option {
- return func(c *cfg) {
- c.header = (*headSvcWrapper)(h)
- }
-}
-
-// WithSearchService returns option to set search service.
-func WithSearchService(s *searchsvc.Service) Option {
- return func(c *cfg) {
- c.searcher = (*searchSvcWrapper)(s)
- }
-}
-
-// WithPutService returns option to specify put service.
-func WithPutService(p *putsvc.Service) Option {
- return func(c *cfg) {
- c.placer = (*putSvcWrapper)(p)
- }
-}
-
-// WithNetworkInfo returns option to set network information source.
-func WithNetworkInfo(netInfo NetworkInfo) Option {
- return func(c *cfg) {
- c.netInfo = netInfo
- }
-}
-
-// WithKeyStorage returns option to set local private key storage.
-func WithKeyStorage(ks *util.KeyStorage) Option {
- return func(c *cfg) {
- c.keyStorage = ks
+ c.log = l
}
}
diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go
index a8ebb30651..a78fd77471 100644
--- a/pkg/services/object/delete/util.go
+++ b/pkg/services/object/delete/util.go
@@ -1,35 +1,46 @@
package deletesvc
import (
- "errors"
+ "context"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
-type headSvcWrapper getsvc.Service
+type headSvcWrapper struct {
+ s *getsvc.Service
+}
-type searchSvcWrapper searchsvc.Service
+type searchSvcWrapper struct {
+ s *searchsvc.Service
+}
-type putSvcWrapper putsvc.Service
+type putSvcWrapper struct {
+ s *putsvc.Service
+}
type simpleIDWriter struct {
ids []oid.ID
}
-func (w *headSvcWrapper) headAddress(exec *execCtx, addr oid.Address) (*object.Object, error) {
+func (w *headSvcWrapper) headAddress(ctx context.Context, exec *execCtx, addr oid.Address) (*objectSDK.Object, error) {
wr := getsvc.NewSimpleObjectWriter()
p := getsvc.HeadPrm{}
- p.SetCommonParameters(exec.commonParameters())
+
+ if cp := exec.commonParameters(); cp != nil {
+ commonParameters := *cp
+ p.SetCommonParameters(&commonParameters)
+ }
+
p.SetHeaderWriter(wr)
p.WithRawFlag(true)
p.WithAddress(addr)
- err := (*getsvc.Service)(w).Head(exec.context(), p)
+ err := w.s.Head(ctx, p)
if err != nil {
return nil, err
}
@@ -37,27 +48,16 @@ func (w *headSvcWrapper) headAddress(exec *execCtx, addr oid.Address) (*object.O
return wr.Object(), nil
}
-func (w *headSvcWrapper) splitInfo(exec *execCtx) (*object.SplitInfo, error) {
- _, err := w.headAddress(exec, exec.address())
-
- var errSplitInfo *object.SplitInfoError
-
- switch {
- case err == nil:
- return nil, nil
- case errors.As(err, &errSplitInfo):
- return errSplitInfo.SplitInfo(), nil
- default:
- return nil, err
- }
+func (w *headSvcWrapper) head(ctx context.Context, exec *execCtx) (*objectSDK.Object, error) {
+ return w.headAddress(ctx, exec, exec.address())
}
-func (w *headSvcWrapper) children(exec *execCtx) ([]oid.ID, error) {
+func (w *headSvcWrapper) children(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
link, _ := exec.splitInfo.Link()
a := exec.newAddress(link)
- linking, err := w.headAddress(exec, a)
+ linking, err := w.headAddress(ctx, exec, a)
if err != nil {
return nil, err
}
@@ -65,10 +65,10 @@ func (w *headSvcWrapper) children(exec *execCtx) ([]oid.ID, error) {
return linking.Children(), nil
}
-func (w *headSvcWrapper) previous(exec *execCtx, id oid.ID) (*oid.ID, error) {
+func (w *headSvcWrapper) previous(ctx context.Context, exec *execCtx, id oid.ID) (*oid.ID, error) {
a := exec.newAddress(id)
- h, err := w.headAddress(exec, a)
+ h, err := w.headAddress(ctx, exec, a)
if err != nil {
return nil, err
}
@@ -81,9 +81,9 @@ func (w *headSvcWrapper) previous(exec *execCtx, id oid.ID) (*oid.ID, error) {
return nil, nil
}
-func (w *searchSvcWrapper) splitMembers(exec *execCtx) ([]oid.ID, error) {
- fs := object.SearchFilters{}
- fs.AddSplitIDFilter(object.MatchStringEqual, exec.splitInfo.SplitID())
+func (w *searchSvcWrapper) splitMembers(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddSplitIDFilter(objectSDK.MatchStringEqual, exec.splitInfo.SplitID())
wr := new(simpleIDWriter)
@@ -93,7 +93,7 @@ func (w *searchSvcWrapper) splitMembers(exec *execCtx) ([]oid.ID, error) {
p.WithContainerID(exec.containerID())
p.WithSearchFilters(fs)
- err := (*searchsvc.Service)(w).Search(exec.context(), p)
+ err := w.s.Search(ctx, p)
if err != nil {
return nil, err
}
@@ -107,8 +107,8 @@ func (s *simpleIDWriter) WriteIDs(ids []oid.ID) error {
return nil
}
-func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) {
- streamer, err := (*putsvc.Service)(w).Put(exec.context())
+func (w *putSvcWrapper) put(ctx context.Context, exec *execCtx) (*oid.ID, error) {
+ streamer, err := w.s.Put()
if err != nil {
return nil, err
}
@@ -119,17 +119,17 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) {
WithCommonPrm(exec.commonParameters()).
WithObject(exec.tombstoneObj.CutPayload())
- err = streamer.Init(initPrm)
+ err = streamer.Init(ctx, initPrm)
if err != nil {
return nil, err
}
- err = streamer.SendChunk(new(putsvc.PutChunkPrm).WithChunk(payload))
+ err = streamer.SendChunk(ctx, new(putsvc.PutChunkPrm).WithChunk(payload))
if err != nil {
return nil, err
}
- r, err := streamer.Close()
+ r, err := streamer.Close(ctx)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/delete/v2/service.go b/pkg/services/object/delete/v2/service.go
index 51759c5df5..7146f0361a 100644
--- a/pkg/services/object/delete/v2/service.go
+++ b/pkg/services/object/delete/v2/service.go
@@ -3,32 +3,19 @@ package deletesvc
import (
"context"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Delete operation of Object service v2.
type Service struct {
- *cfg
-}
-
-// Option represents Service constructor option.
-type Option func(*cfg)
-
-type cfg struct {
svc *deletesvc.Service
}
// NewService constructs Service instance from provided options.
-func NewService(opts ...Option) *Service {
- c := new(cfg)
-
- for i := range opts {
- opts[i](c)
- }
-
+func NewService(svc *deletesvc.Service) *Service {
return &Service{
- cfg: c,
+ svc: svc,
}
}
@@ -51,9 +38,3 @@ func (s *Service) Delete(ctx context.Context, req *objectV2.DeleteRequest) (*obj
return resp, nil
}
-
-func WithInternalService(v *deletesvc.Service) Option {
- return func(c *cfg) {
- c.svc = v
- }
-}
diff --git a/pkg/services/object/delete/v2/util.go b/pkg/services/object/delete/v2/util.go
index d0db1f543d..c57d4562a8 100644
--- a/pkg/services/object/delete/v2/util.go
+++ b/pkg/services/object/delete/v2/util.go
@@ -4,10 +4,10 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index ed77b5693b..e801324896 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -4,15 +4,16 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
-func (exec *execCtx) assemble() {
- if !exec.canAssemble() {
- exec.log.Debug("can not assemble the object")
+func (r *request) assemble(ctx context.Context) {
+ if !r.canAssembleComplexObject() {
+ r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
return
}
@@ -27,67 +28,65 @@ func (exec *execCtx) assemble() {
// - the assembly process is expected to be handled on a container node
// only since the requests forwarding mechanism presentation; such the
// node should have enough rights for getting any child object by design.
- exec.prm.common.ForgetTokens()
+ r.prm.common.ForgetTokens()
// Do not use forwarding during assembly stage.
// Request forwarding closure inherited in produced
// `execCtx` so it should be disabled there.
- exec.disableForwarding()
+ r.disableForwarding()
- exec.log.Debug("trying to assemble the object...")
+ r.log.Debug(ctx, logs.GetTryingToAssembleTheObject)
- assembler := newAssembler(exec.address(), exec.splitInfo(), exec.ctxRange(), exec)
+ r.prm.common = r.prm.common.WithLocalOnly(false)
+ assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly())
- exec.log.Debug("assembling splitted object...",
- zap.Stringer("address", exec.address()),
- zap.Uint64("range_offset", exec.ctxRange().GetOffset()),
- zap.Uint64("range_length", exec.ctxRange().GetLength()),
+ r.log.Debug(ctx, logs.GetAssemblingSplittedObject,
+ zap.Uint64("range_offset", r.ctxRange().GetOffset()),
+ zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer exec.log.Debug("assembling splitted object completed",
- zap.Stringer("address", exec.address()),
- zap.Uint64("range_offset", exec.ctxRange().GetOffset()),
- zap.Uint64("range_length", exec.ctxRange().GetLength()),
+ defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted,
+ zap.Uint64("range_offset", r.ctxRange().GetOffset()),
+ zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- obj, err := assembler.Assemble(exec.context(), exec.prm.objWriter)
+ obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil {
- exec.log.Warn("failed to assemble splitted object",
+ r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject,
zap.Error(err),
- zap.Stringer("address", exec.address()),
- zap.Uint64("range_offset", exec.ctxRange().GetOffset()),
- zap.Uint64("range_length", exec.ctxRange().GetLength()),
+ zap.Uint64("range_offset", r.ctxRange().GetOffset()),
+ zap.Uint64("range_length", r.ctxRange().GetLength()),
)
}
var errSplitInfo *objectSDK.SplitInfoError
var errRemovedRemote *apistatus.ObjectAlreadyRemoved
var errOutOfRangeRemote *apistatus.ObjectOutOfRange
- var errRemovedLocal apistatus.ObjectAlreadyRemoved
- var errOutOfRangeLocal apistatus.ObjectOutOfRange
+ var errRemovedLocal *apistatus.ObjectAlreadyRemoved
+ var errOutOfRangeLocal *apistatus.ObjectOutOfRange
switch {
default:
- exec.status = statusUndefined
- exec.err = err
+ r.status = statusUndefined
+ r.err = err
case err == nil:
- exec.status = statusOK
- exec.err = nil
- exec.collectedObject = obj
+ r.status = statusOK
+ r.err = nil
+ r.collectedObject = obj
case errors.As(err, &errRemovedRemote):
- exec.status = statusINHUMED
- exec.err = errRemovedRemote
+ r.status = statusINHUMED
+ r.err = errRemovedRemote
case errors.As(err, &errRemovedLocal):
- exec.status = statusINHUMED
- exec.err = errRemovedLocal
+ r.status = statusINHUMED
+ r.err = errRemovedLocal
case errors.As(err, &errSplitInfo):
- exec.status = statusVIRTUAL
- exec.err = errSplitInfo
+ r.status = statusVIRTUAL
+ r.err = errSplitInfo
case errors.As(err, &errOutOfRangeRemote):
- exec.status = statusOutOfRange
- exec.err = errOutOfRangeRemote
+ r.status = statusOutOfRange
+ r.err = errOutOfRangeRemote
case errors.As(err, &errOutOfRangeLocal):
- exec.status = statusOutOfRange
- exec.err = errOutOfRangeLocal
+ r.status = statusOutOfRange
+ r.err = errOutOfRangeLocal
}
}
@@ -95,43 +94,57 @@ func equalAddresses(a, b oid.Address) bool {
return a.Container().Equals(b.Container()) && a.Object().Equals(b.Object())
}
-func (exec *execCtx) HeadObject(ctx context.Context, id oid.ID) (*objectSDK.Object, error) {
- p := exec.prm
- p.common = p.common.WithLocalOnly(false)
- p.addr.SetContainer(exec.containerID())
- p.addr.SetObject(id)
-
- prm := HeadPrm{
- commonPrm: p.commonPrm,
- }
-
+func (r *request) HeadObject(ctx context.Context, id oid.ID) (*objectSDK.Object, error) {
w := NewSimpleObjectWriter()
- prm.SetHeaderWriter(w)
- err := exec.svc.Head(exec.context(), prm)
+ p := RequestParameters{}
+ p.common = p.common.WithLocalOnly(false)
+ p.addr.SetContainer(r.containerID())
+ p.addr.SetObject(id)
+ p.head = true
+ p.SetHeaderWriter(w)
- if err != nil {
+ if err := r.getObjectWithIndependentRequest(ctx, p); err != nil {
return nil, err
}
return w.Object(), nil
}
-func (exec *execCtx) GetObject(ctx context.Context, id oid.ID, rng *objectSDK.Range) (*objectSDK.Object, error) {
- w := NewSimpleObjectWriter()
+func (r *request) GetObjectAndWritePayload(ctx context.Context, id oid.ID, rng *objectSDK.Range, writer ChunkWriter) (*objectSDK.Object, error) {
+ w := &payloadWriter{
+ origin: writer,
+ }
- p := exec.prm
- p.common = p.common.WithLocalOnly(false)
+ p := r.prm
p.objWriter = w
- p.SetRange(rng)
+ p.rng = rng
- p.addr.SetContainer(exec.containerID())
+ p.addr.SetContainer(r.containerID())
p.addr.SetObject(id)
- statusError := exec.svc.get(exec.context(), p.commonPrm, withPayloadRange(rng))
-
- if statusError.err != nil {
- return nil, statusError.err
+ if err := r.getObjectWithIndependentRequest(ctx, p); err != nil {
+ return nil, err
}
- return w.Object(), nil
+ return w.obj, nil
+}
+
+func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm RequestParameters) error {
+ detachedExecutor := &request{
+ keyStore: r.keyStore,
+ traverserGenerator: r.traverserGenerator,
+ remoteStorageConstructor: r.remoteStorageConstructor,
+ epochSource: r.epochSource,
+ localStorage: r.localStorage,
+ containerSource: r.containerSource,
+
+ prm: prm,
+ infoSplit: objectSDK.NewSplitInfo(),
+ infoEC: newECInfo(),
+ log: r.log,
+ }
+
+ detachedExecutor.execute(ctx)
+
+ return detachedExecutor.err
}
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
new file mode 100644
index 0000000000..59dd7fd93e
--- /dev/null
+++ b/pkg/services/object/get/assembleec.go
@@ -0,0 +1,88 @@
+package getsvc
+
+import (
+ "context"
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.uber.org/zap"
+)
+
+func (r *request) assembleEC(ctx context.Context) {
+ if r.isRaw() {
+ r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
+ return
+ }
+
+ // Any access tokens are not expected to be used in the assembly process:
+ // - there is no requirement to specify child objects in session/bearer
+ // token for `GET`/`GETRANGE`/`RANGEHASH` requests in the API protocol,
+ // and, therefore, their missing in the original request should not be
+ // considered as error; on the other hand, without session for every child
+ // object, it is impossible to attach bearer token in the new generated
+ // requests correctly because the token has not been issued for that node's
+ // key;
+ // - the assembly process is expected to be handled on a container node
+ // only since the requests forwarding mechanism presentation; such the
+ // node should have enough rights for getting any child object by design.
+ r.prm.common.ForgetTokens()
+
+ // Do not use forwarding during assembly stage.
+ // Request forwarding closure inherited in produced
+ // `execCtx` so it should be disabled there.
+ r.disableForwarding()
+
+ r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject)
+
+ // initialize epoch number
+ ok := r.initEpoch(ctx)
+ if !ok {
+ return
+ }
+
+ r.prm.common = r.prm.common.WithLocalOnly(false)
+ assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
+
+ r.log.Debug(ctx, logs.GetAssemblingECObject,
+ zap.Uint64("range_offset", r.ctxRange().GetOffset()),
+ zap.Uint64("range_length", r.ctxRange().GetLength()),
+ )
+ defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted,
+ zap.Uint64("range_offset", r.ctxRange().GetOffset()),
+ zap.Uint64("range_length", r.ctxRange().GetLength()),
+ )
+
+ obj, err := assembler.Assemble(ctx, r.prm.objWriter)
+ if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) {
+ r.log.Warn(ctx, logs.GetFailedToAssembleECObject,
+ zap.Error(err),
+ zap.Uint64("range_offset", r.ctxRange().GetOffset()),
+ zap.Uint64("range_length", r.ctxRange().GetLength()),
+ )
+ }
+
+ var errRemoved *apistatus.ObjectAlreadyRemoved
+ var errOutOfRange *apistatus.ObjectOutOfRange
+ var errECInfo *objectSDK.ECInfoError
+
+ switch {
+ default:
+ r.status = statusUndefined
+ r.err = err
+ case err == nil:
+ r.status = statusOK
+ r.err = nil
+ r.collectedObject = obj
+ case errors.As(err, &errRemoved):
+ r.status = statusINHUMED
+ r.err = errRemoved
+ case errors.As(err, &errOutOfRange):
+ r.status = statusOutOfRange
+ r.err = errOutOfRange
+ case errors.As(err, &errECInfo):
+ r.status = statusEC
+ r.err = err
+ }
+}
diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go
index 4ae1981b18..b24c9417ba 100644
--- a/pkg/services/object/get/assembler.go
+++ b/pkg/services/object/get/assembler.go
@@ -2,7 +2,7 @@ package getsvc
import (
"context"
- "errors"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -11,19 +11,16 @@ import (
)
type objectGetter interface {
- GetObject(ctx context.Context, id oid.ID, rng *objectSDK.Range) (*objectSDK.Object, error)
+ GetObjectAndWritePayload(ctx context.Context, id oid.ID, rng *objectSDK.Range, writer ChunkWriter) (*objectSDK.Object, error)
HeadObject(ctx context.Context, id oid.ID) (*objectSDK.Object, error)
}
-var (
- errParentAddressDiffers = errors.New("parent address in child object differs")
-)
-
type assembler struct {
addr oid.Address
splitInfo *objectSDK.SplitInfo
rng *objectSDK.Range
objGetter objectGetter
+ head bool
currentOffset uint64
@@ -34,18 +31,24 @@ func newAssembler(
addr oid.Address,
splitInfo *objectSDK.SplitInfo,
rng *objectSDK.Range,
- objGetter objectGetter) *assembler {
+ objGetter objectGetter,
+ head bool,
+) *assembler {
return &assembler{
addr: addr,
rng: rng,
splitInfo: splitInfo,
objGetter: objGetter,
+ head: head,
}
}
// Assemble assembles splitted large object and writes it's content to ObjectWriter.
// It returns parent object.
func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ if a.head {
+ return a.assembleHeader(ctx, writer)
+ }
sourceObjectID, ok := a.getLastPartOrLinkObjectID()
if !ok {
return nil, objectSDK.NewSplitInfoError(a.splitInfo)
@@ -57,15 +60,23 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS
if previousID == nil && len(childrenIDs) == 0 {
return nil, objectSDK.NewSplitInfoError(a.splitInfo)
}
+
if len(childrenIDs) > 0 {
- if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil {
- return nil, err
+ if a.rng != nil {
+ err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer)
+ } else {
+ err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer)
}
} else {
- if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil {
- return nil, err
+ if a.rng != nil {
+ err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer)
+ } else {
+ err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer)
}
}
+ if err != nil {
+ return nil, err
+ }
return a.parentObject, nil
}
@@ -82,14 +93,16 @@ func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) {
}
func (a *assembler) initializeFromSourceObjectID(ctx context.Context, id oid.ID) (*oid.ID, []oid.ID, error) {
- sourceObject, err := a.getChildObject(ctx, id, nil, true)
+ w := NewSimpleObjectWriter()
+ sourceObject, err := a.getChildObject(ctx, id, nil, true, w)
if err != nil {
return nil, nil, err
}
+ sourceObject.SetPayload(w.pld)
parentObject := sourceObject.Parent()
if parentObject == nil {
- return nil, nil, errors.New("received child with empty parent")
+ return nil, nil, errChildWithEmptyParent
}
a.parentObject = parentObject
@@ -116,7 +129,7 @@ func (a *assembler) initializeFromSourceObjectID(ctx context.Context, id oid.ID)
}
to := uint64(0)
- if seekOff+seekLen > a.currentOffset+from {
+ if seekOff+seekLen >= a.currentOffset+from {
to = seekOff + seekLen - a.currentOffset
}
@@ -136,8 +149,8 @@ func (a *assembler) initializeFromSourceObjectID(ctx context.Context, id oid.ID)
return nil, sourceObject.Children(), nil
}
-func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSDK.Range, verifyIsChild bool) (*objectSDK.Object, error) {
- obj, err := a.objGetter.GetObject(ctx, id, rng)
+func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSDK.Range, verifyIsChild bool, writer ChunkWriter) (*objectSDK.Object, error) {
+ obj, err := a.objGetter.GetObjectAndWritePayload(ctx, id, rng, writer)
if err != nil {
return nil, err
}
@@ -149,29 +162,16 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD
}
func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
- if a.rng == nil {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
- return err
- }
- return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true)
- }
-
- if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
return err
}
- if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil {
- return err
- }
- return nil
+ return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true)
}
func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
- if a.rng == nil {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
- return err
- }
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ return err
}
-
if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil {
return err
}
@@ -181,44 +181,24 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev
return nil
}
-func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error {
- withRng := len(partRanges) > 0 && a.rng != nil
-
+func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error {
for i := range partIDs {
- var r *objectSDK.Range
- if withRng {
- r = &partRanges[i]
- }
-
- child, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild)
+ _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer)
if err != nil {
return err
}
-
- if err := writer.WriteChunk(ctx, child.Payload()); err != nil {
- return err
- }
}
return nil
}
func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
- chain, rngs, err := a.buildChain(ctx, prevID)
+ chain, err := a.buildChain(ctx, prevID)
if err != nil {
return err
}
- reverseRngs := len(rngs) > 0
-
- for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 {
- chain[left], chain[right] = chain[right], chain[left]
-
- if reverseRngs {
- rngs[left], rngs[right] = rngs[right], rngs[left]
- }
- }
-
- return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false)
+ slices.Reverse(chain)
+ return a.assemblePayloadByObjectIDs(ctx, writer, chain, false)
}
func (a *assembler) isChild(obj *objectSDK.Object) bool {
@@ -226,63 +206,28 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool {
return parent == nil || equalAddresses(a.addr, object.AddressOf(parent))
}
-func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
+func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) {
var (
chain []oid.ID
- rngs []objectSDK.Range
- from = a.rng.GetOffset()
- to = from + a.rng.GetLength()
hasPrev = true
)
// fill the chain end-to-start
for hasPrev {
- // check that only for "range" requests,
- // for `GET` it stops via the false `withPrev`
- if a.rng != nil && a.currentOffset <= from {
- break
- }
-
head, err := a.objGetter.HeadObject(ctx, prevID)
if err != nil {
- return nil, nil, err
+ return nil, err
}
if !a.isChild(head) {
- return nil, nil, errParentAddressDiffers
+ return nil, errParentAddressDiffers
}
- if a.rng != nil {
- sz := head.PayloadSize()
-
- a.currentOffset -= sz
-
- if a.currentOffset < to {
- off := uint64(0)
- if from > a.currentOffset {
- off = from - a.currentOffset
- sz -= from - a.currentOffset
- }
-
- if to < a.currentOffset+off+sz {
- sz = to - off - a.currentOffset
- }
-
- index := len(rngs)
- rngs = append(rngs, objectSDK.Range{})
- rngs[index].SetOffset(off)
- rngs[index].SetLength(sz)
-
- id, _ := head.ID()
- chain = append(chain, id)
- }
- } else {
- id, _ := head.ID()
- chain = append(chain, id)
- }
+ id, _ := head.ID()
+ chain = append(chain, id)
prevID, hasPrev = head.PreviousID()
}
- return chain, rngs, nil
+ return chain, nil
}
diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go
new file mode 100644
index 0000000000..ff213cb821
--- /dev/null
+++ b/pkg/services/object/get/assembler_head.go
@@ -0,0 +1,45 @@
+package getsvc
+
+import (
+ "context"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ var sourceObjectIDs []oid.ID
+ sourceObjectID, ok := a.splitInfo.Link()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ sourceObjectID, ok = a.splitInfo.LastPart()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ if len(sourceObjectIDs) == 0 {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ for _, sourceObjectID = range sourceObjectIDs {
+ obj, err := a.getParent(ctx, sourceObjectID, writer)
+ if err == nil {
+ return obj, nil
+ }
+ }
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+}
+
+func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
+ obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
+ if err != nil {
+ return nil, err
+ }
+ parent := obj.Parent()
+ if parent == nil {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ if err := writer.WriteHeader(ctx, parent); err != nil {
+ return nil, err
+ }
+ return obj, nil
+}
diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go
new file mode 100644
index 0000000000..780693c409
--- /dev/null
+++ b/pkg/services/object/get/assembler_range.go
@@ -0,0 +1,87 @@
+package getsvc
+
+import (
+ "context"
+ "slices"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
+ if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
+ return err
+ }
+ return writer.WriteChunk(ctx, a.parentObject.Payload())
+}
+
+func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
+ if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil {
+ return err
+ }
+ if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part
+ return err
+ }
+ return nil
+}
+
+func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error {
+ for i := range partIDs {
+ _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
+ chain, rngs, err := a.buildChainRange(ctx, prevID)
+ if err != nil {
+ return err
+ }
+
+ slices.Reverse(chain)
+ slices.Reverse(rngs)
+ return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs)
+}
+
+func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
+ var (
+ chain []oid.ID
+ rngs []objectSDK.Range
+ from = a.rng.GetOffset()
+ to = from + a.rng.GetLength()
+
+ hasPrev = true
+ )
+
+ // fill the chain end-to-start
+ for hasPrev && from < a.currentOffset {
+ head, err := a.objGetter.HeadObject(ctx, prevID)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !a.isChild(head) {
+ return nil, nil, errParentAddressDiffers
+ }
+
+ nextOffset := a.currentOffset - head.PayloadSize()
+ clampedFrom := max(from, nextOffset)
+ clampedTo := min(to, a.currentOffset)
+ if clampedFrom < clampedTo {
+ index := len(rngs)
+ rngs = append(rngs, objectSDK.Range{})
+ rngs[index].SetOffset(clampedFrom - nextOffset)
+ rngs[index].SetLength(clampedTo - clampedFrom)
+
+ id, _ := head.ID()
+ chain = append(chain, id)
+ }
+
+ a.currentOffset = nextOffset
+ prevID, hasPrev = head.PreviousID()
+ }
+
+ return chain, rngs, nil
+}
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
new file mode 100644
index 0000000000..e0a7e1da6f
--- /dev/null
+++ b/pkg/services/object/get/assemblerec.go
@@ -0,0 +1,297 @@
+package getsvc
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+var errECPartsRetrieveCompleted = errors.New("EC parts receive completed")
+
+type ecRemoteStorage interface {
+ getObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo) (*objectSDK.Object, error)
+ headObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo, raw bool) (*objectSDK.Object, error)
+}
+
+type assemblerec struct {
+ addr oid.Address
+ ecInfo *ecInfo
+ rng *objectSDK.Range
+ remoteStorage ecRemoteStorage
+ localStorage localStorage
+ log *logger.Logger
+ head bool
+ traverserGenerator traverserGenerator
+ epoch uint64
+}
+
+func newAssemblerEC(
+ addr oid.Address,
+ ecInfo *ecInfo,
+ rng *objectSDK.Range,
+ remoteStorage ecRemoteStorage,
+ localStorage localStorage,
+ log *logger.Logger,
+ head bool,
+ tg traverserGenerator,
+ epoch uint64,
+) *assemblerec {
+ return &assemblerec{
+ addr: addr,
+ rng: rng,
+ ecInfo: ecInfo,
+ remoteStorage: remoteStorage,
+ localStorage: localStorage,
+ log: log,
+ head: head,
+ traverserGenerator: tg,
+ epoch: epoch,
+ }
+}
+
+// Assemble assembles erasure-coded object and writes it's content to ObjectWriter.
+// It returns parent object.
+func (a *assemblerec) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ switch {
+ case a.head:
+ return a.reconstructHeader(ctx, writer)
+ case a.rng != nil:
+ return a.reconstructRange(ctx, writer)
+ default:
+ return a.reconstructObject(ctx, writer)
+ }
+}
+
+func (a *assemblerec) getConstructor(cnr *container.Container) (*erasurecode.Constructor, error) {
+ dataCount := policy.ECDataCount(cnr.Value.PlacementPolicy())
+ parityCount := policy.ECParityCount(cnr.Value.PlacementPolicy())
+ return erasurecode.NewConstructor(dataCount, parityCount)
+}
+
+func (a *assemblerec) reconstructHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ obj, err := a.reconstructObjectFromParts(ctx, true)
+ if err == nil {
+ return obj, writer.WriteHeader(ctx, obj)
+ }
+ return nil, err
+}
+
+func (a *assemblerec) reconstructRange(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ obj, err := a.reconstructObjectFromParts(ctx, false)
+ if err != nil {
+ return nil, err
+ }
+
+ from := a.rng.GetOffset()
+ to := from + a.rng.GetLength()
+ if pLen := uint64(len(obj.Payload())); to < from || pLen < from || pLen < to {
+ return nil, &apistatus.ObjectOutOfRange{}
+ }
+ err = writer.WriteChunk(ctx, obj.Payload()[from:to])
+ if err != nil {
+ return nil, err
+ }
+ return obj, err
+}
+
+func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ obj, err := a.reconstructObjectFromParts(ctx, false)
+ if err == nil {
+ err = writer.WriteHeader(ctx, obj.CutPayload())
+ if err == nil {
+ err = writer.WriteChunk(ctx, obj.Payload())
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return obj, err
+}
+
+func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) {
+ objID := a.addr.Object()
+ trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch)
+ if err != nil {
+ return nil, err
+ }
+ c, err := a.getConstructor(cnr)
+ if err != nil {
+ return nil, err
+ }
+ parts := a.retrieveParts(ctx, trav, cnr)
+ if headers {
+ return c.ReconstructHeader(parts)
+ }
+ return c.Reconstruct(parts)
+}
+
+func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Traverser, cnr *container.Container) []*objectSDK.Object {
+ dataCount := policy.ECDataCount(cnr.Value.PlacementPolicy())
+ parityCount := policy.ECParityCount(cnr.Value.PlacementPolicy())
+
+ remoteNodes := make([]placement.Node, 0)
+ for {
+ batch := trav.Next()
+ if len(batch) == 0 {
+ break
+ }
+ remoteNodes = append(remoteNodes, batch...)
+ }
+
+ parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount)
+ if err != nil {
+ a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err))
+ }
+ return parts
+}
+
+func (a *assemblerec) processECNodesRequests(ctx context.Context, nodes []placement.Node, dataCount, parityCount int) ([]*objectSDK.Object, error) {
+ foundChunks := make(map[uint32]*objectSDK.Object)
+ var foundChunksGuard sync.Mutex
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.SetLimit(dataCount)
+
+ for _, ch := range a.ecInfo.localChunks {
+ eg.Go(func() error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ object := a.tryGetChunkFromLocalStorage(ctx, ch)
+ if object == nil {
+ return nil
+ }
+ foundChunksGuard.Lock()
+ foundChunks[ch.Index] = object
+ count := len(foundChunks)
+ foundChunksGuard.Unlock()
+ if count >= dataCount {
+ return errECPartsRetrieveCompleted
+ }
+ return nil
+ })
+ }
+
+ for _, node := range nodes {
+ var info client.NodeInfo
+ client.NodeInfoFromNetmapElement(&info, node)
+ eg.Go(func() error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ chunks := a.tryGetChunkListFromNode(ctx, info)
+ for _, ch := range chunks {
+ object := a.tryGetChunkFromRemoteStorage(ctx, info, ch)
+ if object == nil {
+ continue
+ }
+ foundChunksGuard.Lock()
+ foundChunks[ch.Index] = object
+ count := len(foundChunks)
+ foundChunksGuard.Unlock()
+ if count >= dataCount {
+ return errECPartsRetrieveCompleted
+ }
+ }
+ return nil
+ })
+ }
+ err := eg.Wait()
+ if err == nil || errors.Is(err, errECPartsRetrieveCompleted) {
+ parts := make([]*objectSDK.Object, dataCount+parityCount)
+ for idx, chunk := range foundChunks {
+ parts[idx] = chunk
+ }
+ return parts, nil
+ }
+ return nil, err
+}
+
+func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch objectSDK.ECChunk) *objectSDK.Object {
+ var objID oid.ID
+ err := objID.ReadFromV2(ch.ID)
+ if err != nil {
+ a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ return nil
+ }
+ var addr oid.Address
+ addr.SetContainer(a.addr.Container())
+ addr.SetObject(objID)
+ var object *objectSDK.Object
+ if a.head {
+ object, err = a.localStorage.Head(ctx, addr, false)
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ }
+ } else {
+ object, err = a.localStorage.Get(ctx, addr)
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+ }
+ }
+ return object
+}
+
+func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.NodeInfo) []objectSDK.ECChunk {
+ if chunks, found := a.ecInfo.remoteChunks[string(node.PublicKey())]; found {
+ return chunks
+ }
+ var errECInfo *objectSDK.ECInfoError
+ _, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true)
+ if err == nil {
+ a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
+ return nil
+ }
+ if !errors.As(err, &errECInfo) {
+ a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
+ return nil
+ }
+ result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks))
+ for _, ch := range errECInfo.ECInfo().Chunks {
+ result = append(result, objectSDK.ECChunk(ch))
+ }
+ return result
+}
+
+func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node client.NodeInfo, ch objectSDK.ECChunk) *objectSDK.Object {
+ var objID oid.ID
+ err := objID.ReadFromV2(ch.ID)
+ if err != nil {
+ a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+ return nil
+ }
+ var addr oid.Address
+ addr.SetContainer(a.addr.Container())
+ addr.SetObject(objID)
+ var object *objectSDK.Object
+ if a.head {
+ object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false)
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ }
+ } else {
+ object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node)
+ if err != nil && !errors.Is(err, context.Canceled) {
+ a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+ }
+ }
+ return object
+}
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index 882861129e..dfb31133c4 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -3,64 +3,62 @@ package getsvc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"go.uber.org/zap"
)
-func (exec *execCtx) executeOnContainer() {
- if exec.isLocal() {
- exec.log.Debug("return result directly")
+func (r *request) executeOnContainer(ctx context.Context) {
+ if r.isLocal() {
+ r.log.Debug(ctx, logs.GetReturnResultDirectly)
return
}
- lookupDepth := exec.netmapLookupDepth()
+ lookupDepth := r.netmapLookupDepth()
- exec.log.Debug("trying to execute in container...",
+ r.log.Debug(ctx, logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- ok := exec.initEpoch()
+ ok := r.initEpoch(ctx)
if !ok {
return
}
- for {
- if exec.processCurrentEpoch() {
- break
- }
-
- // check the maximum depth has been reached
- if lookupDepth == 0 {
- break
- }
+ localStatus := r.status
+ for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 {
lookupDepth--
// go to the previous epoch
- exec.curProcEpoch--
+ r.curProcEpoch--
}
}
-func (exec *execCtx) processCurrentEpoch() bool {
- exec.log.Debug("process epoch",
- zap.Uint64("number", exec.curProcEpoch),
+func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool {
+ r.log.Debug(ctx, logs.ProcessEpoch,
+ zap.Uint64("number", r.curProcEpoch),
)
- traverser, ok := exec.generateTraverser(exec.address())
+ traverser, ok := r.generateTraverser(ctx, r.address())
if !ok {
return true
}
- ctx, cancel := context.WithCancel(exec.context())
+ ctx, cancel := context.WithCancel(ctx)
defer cancel()
- exec.status = statusUndefined
+ if localStatus == statusEC { // possible only for raw == true and local == false
+ r.status = statusEC
+ } else {
+ r.status = statusUndefined
+ }
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug("no more nodes, abort placement iteration")
+ r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
return false
}
@@ -68,8 +66,8 @@ func (exec *execCtx) processCurrentEpoch() bool {
for i := range addrs {
select {
case <-ctx.Done():
- exec.log.Debug("interrupt placement iteration by context",
- zap.String("error", ctx.Err().Error()),
+ r.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
+ zap.Error(ctx.Err()),
)
return true
@@ -83,8 +81,8 @@ func (exec *execCtx) processCurrentEpoch() bool {
client.NodeInfoFromNetmapElement(&info, addrs[i])
- if exec.processNode(ctx, info) {
- exec.log.Debug("completing the operation")
+ if r.processNode(ctx, info) {
+ r.log.Debug(ctx, logs.GetCompletingTheOperation)
return true
}
}
diff --git a/pkg/services/object/get/errors.go b/pkg/services/object/get/errors.go
new file mode 100644
index 0000000000..6ea16a144f
--- /dev/null
+++ b/pkg/services/object/get/errors.go
@@ -0,0 +1,10 @@
+package getsvc
+
+import "errors"
+
+var (
+ errRangeZeroLength = errors.New("zero range length")
+ errRangeOverflow = errors.New("range overflow")
+ errChildWithEmptyParent = errors.New("received child with empty parent")
+ errParentAddressDiffers = errors.New("parent address in child object differs")
+)
diff --git a/pkg/services/object/get/exec.go b/pkg/services/object/get/exec.go
deleted file mode 100644
index 09ff9a82f9..0000000000
--- a/pkg/services/object/get/exec.go
+++ /dev/null
@@ -1,284 +0,0 @@
-package getsvc
-
-import (
- "context"
- "crypto/ecdsa"
-
- clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-type statusError struct {
- status int
- err error
-}
-
-type execCtx struct {
- svc *Service
-
- ctx context.Context
-
- prm RangePrm
-
- statusError
-
- infoSplit *objectSDK.SplitInfo
-
- log *logger.Logger
-
- collectedObject *objectSDK.Object
-
- head bool
-
- curProcEpoch uint64
-}
-
-type execOption func(*execCtx)
-
-const (
- statusUndefined int = iota
- statusOK
- statusINHUMED
- statusVIRTUAL
- statusOutOfRange
-)
-
-func headOnly() execOption {
- return func(c *execCtx) {
- c.head = true
- }
-}
-
-func withPayloadRange(r *objectSDK.Range) execOption {
- return func(c *execCtx) {
- c.prm.rng = r
- }
-}
-
-func (exec *execCtx) setLogger(l *logger.Logger) {
- req := "GET"
- if exec.headOnly() {
- req = "HEAD"
- } else if exec.ctxRange() != nil {
- req = "GET_RANGE"
- }
-
- exec.log = &logger.Logger{Logger: l.With(
- zap.String("request", req),
- zap.Stringer("address", exec.address()),
- zap.Bool("raw", exec.isRaw()),
- zap.Bool("local", exec.isLocal()),
- zap.Bool("with session", exec.prm.common.SessionToken() != nil),
- zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )}
-}
-
-func (exec execCtx) context() context.Context {
- return exec.ctx
-}
-
-func (exec execCtx) isLocal() bool {
- return exec.prm.common.LocalOnly()
-}
-
-func (exec execCtx) isRaw() bool {
- return exec.prm.raw
-}
-
-func (exec execCtx) address() oid.Address {
- return exec.prm.addr
-}
-
-func (exec execCtx) key() (*ecdsa.PrivateKey, error) {
- if exec.prm.signerKey != nil {
- // the key has already been requested and
- // cached in the previous operations
- return exec.prm.signerKey, nil
- }
-
- var sessionInfo *util.SessionInfo
-
- if tok := exec.prm.common.SessionToken(); tok != nil {
- sessionInfo = &util.SessionInfo{
- ID: tok.ID(),
- Owner: tok.Issuer(),
- }
- }
-
- return exec.svc.keyStore.GetKey(sessionInfo)
-}
-
-func (exec *execCtx) canAssemble() bool {
- return !exec.isRaw() && !exec.headOnly()
-}
-
-func (exec *execCtx) splitInfo() *objectSDK.SplitInfo {
- return exec.infoSplit
-}
-
-func (exec *execCtx) containerID() cid.ID {
- return exec.address().Container()
-}
-
-func (exec *execCtx) ctxRange() *objectSDK.Range {
- return exec.prm.rng
-}
-
-func (exec *execCtx) headOnly() bool {
- return exec.head
-}
-
-func (exec *execCtx) netmapEpoch() uint64 {
- return exec.prm.common.NetmapEpoch()
-}
-
-func (exec *execCtx) netmapLookupDepth() uint64 {
- return exec.prm.common.NetmapLookupDepth()
-}
-
-func (exec *execCtx) initEpoch() bool {
- exec.curProcEpoch = exec.netmapEpoch()
- if exec.curProcEpoch > 0 {
- return true
- }
-
- e, err := exec.svc.currentEpochReceiver.currentEpoch()
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not get current epoch number",
- zap.String("error", err.Error()),
- )
-
- return false
- case err == nil:
- exec.curProcEpoch = e
- return true
- }
-}
-
-func (exec *execCtx) generateTraverser(addr oid.Address) (*placement.Traverser, bool) {
- obj := addr.Object()
-
- t, err := exec.svc.traverserGenerator.GenerateTraverser(addr.Container(), &obj, exec.curProcEpoch)
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not generate container traverser",
- zap.String("error", err.Error()),
- )
-
- return nil, false
- case err == nil:
- return t, true
- }
-}
-
-func (exec execCtx) remoteClient(info clientcore.NodeInfo) (getClient, bool) {
- c, err := exec.svc.clientCache.get(info)
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not construct remote node client")
- case err == nil:
- return c, true
- }
-
- return nil, false
-}
-
-func mergeSplitInfo(dst, src *objectSDK.SplitInfo) {
- if last, ok := src.LastPart(); ok {
- dst.SetLastPart(last)
- }
-
- if link, ok := src.Link(); ok {
- dst.SetLink(link)
- }
-
- if splitID := src.SplitID(); splitID != nil {
- dst.SetSplitID(splitID)
- }
-}
-
-func (exec *execCtx) writeCollectedHeader() bool {
- if exec.ctxRange() != nil {
- return true
- }
-
- err := exec.prm.objWriter.WriteHeader(
- exec.context(),
- exec.collectedObject.CutPayload(),
- )
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not write header",
- zap.String("error", err.Error()),
- )
- case err == nil:
- exec.status = statusOK
- exec.err = nil
- }
-
- return exec.status == statusOK
-}
-
-func (exec *execCtx) writeObjectPayload(obj *objectSDK.Object) bool {
- if exec.headOnly() {
- return true
- }
-
- err := exec.prm.objWriter.WriteChunk(exec.context(), obj.Payload())
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not write payload chunk",
- zap.String("error", err.Error()),
- )
- case err == nil:
- exec.status = statusOK
- exec.err = nil
- }
-
- return err == nil
-}
-
-func (exec *execCtx) writeCollectedObject() {
- if ok := exec.writeCollectedHeader(); ok {
- exec.writeObjectPayload(exec.collectedObject)
- }
-}
-
-// isForwardingEnabled returns true if common execution
-// parameters has request forwarding closure set.
-func (exec execCtx) isForwardingEnabled() bool {
- return exec.prm.forwarder != nil
-}
-
-// disableForwarding removes request forwarding closure from common
-// parameters, so it won't be inherited in new execution contexts.
-func (exec *execCtx) disableForwarding() {
- exec.prm.SetRequestForwarder(nil)
-}
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index f3d5f8a5f8..3a50308c22 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -2,24 +2,28 @@ package getsvc
import (
"context"
+ "errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
// Get serves a request to get an object by address, and returns Streamer instance.
func (s *Service) Get(ctx context.Context, prm Prm) error {
- return s.get(ctx, prm.commonPrm).err
+ return s.get(ctx, RequestParameters{
+ commonPrm: prm.commonPrm,
+ })
}
// GetRange serves a request to get an object by address, and returns Streamer instance.
func (s *Service) GetRange(ctx context.Context, prm RangePrm) error {
- return s.getRange(ctx, prm)
-}
-
-func (s *Service) getRange(ctx context.Context, prm RangePrm, opts ...execOption) error {
- return s.get(ctx, prm.commonPrm, append(opts, withPayloadRange(prm.rng))...).err
+ return s.get(ctx, RequestParameters{
+ commonPrm: prm.commonPrm,
+ rng: prm.rng,
+ })
}
func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHashRes, error) {
@@ -33,16 +37,15 @@ func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHas
// 1. Potential gains are insignificant when operating in the Internet given typical latencies and losses.
// 2. Parallel solution is more complex in terms of code.
// 3. TZ-hash is likely to be disabled in private installations.
- rngPrm := RangePrm{
+ reqPrm := RequestParameters{
commonPrm: prm.commonPrm,
+ rng: &rng,
}
-
- rngPrm.SetRange(&rng)
- rngPrm.SetChunkWriter(&hasherWrapper{
+ reqPrm.SetChunkWriter(&hasherWrapper{
hash: util.NewSaltingWriter(h, prm.salt),
})
- if err := s.getRange(ctx, rngPrm); err != nil {
+ if err := s.get(ctx, reqPrm); err != nil {
return nil, err
}
@@ -59,59 +62,76 @@ func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHas
// Returns ErrNotFound if the header was not received for the call.
// Returns SplitInfoError if object is virtual and raw flag is set.
func (s *Service) Head(ctx context.Context, prm HeadPrm) error {
- return s.get(ctx, prm.commonPrm, headOnly()).err
+ return s.get(ctx, RequestParameters{
+ head: true,
+ commonPrm: prm.commonPrm,
+ })
}
-func (s *Service) get(ctx context.Context, prm commonPrm, opts ...execOption) statusError {
- exec := &execCtx{
- svc: s,
- ctx: ctx,
- prm: RangePrm{
- commonPrm: prm,
- },
- infoSplit: object.NewSplitInfo(),
- }
+func (s *Service) get(ctx context.Context, prm RequestParameters) error {
+ exec := &request{
+ keyStore: s.keyStore,
+ traverserGenerator: s.traverserGenerator,
+ remoteStorageConstructor: s.remoteStorageConstructor,
+ epochSource: s.epochSource,
+ localStorage: s.localStorage,
+ containerSource: s.containerSource,
- for i := range opts {
- opts[i](exec)
+ prm: prm,
+ infoSplit: objectSDK.NewSplitInfo(),
+ infoEC: newECInfo(),
+ log: s.log,
}
exec.setLogger(s.log)
- exec.execute()
+ exec.execute(ctx)
- return exec.statusError
+ return exec.err
}
-func (exec *execCtx) execute() {
- exec.log.Debug("serving request...")
+func (r *request) execute(ctx context.Context) {
+ r.log.Debug(ctx, logs.ServingRequest)
// perform local operation
- exec.executeLocal()
+ r.executeLocal(ctx)
- exec.analyzeStatus(true)
+ r.analyzeStatus(ctx, true)
}
-func (exec *execCtx) analyzeStatus(execCnr bool) {
+func (r *request) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
- switch exec.status {
+ switch r.status {
case statusOK:
- exec.log.Debug("operation finished successfully")
+ r.log.Debug(ctx, logs.OperationFinishedSuccessfully)
case statusINHUMED:
- exec.log.Debug("requested object was marked as removed")
+ r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved)
case statusVIRTUAL:
- exec.log.Debug("requested object is virtual")
- exec.assemble()
+ r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual)
+ r.assemble(ctx)
case statusOutOfRange:
- exec.log.Debug("requested range is out of object bounds")
+ r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds)
+ case statusEC:
+ r.log.Debug(ctx, logs.GetRequestedObjectIsEC)
+ if r.isRaw() && execCnr {
+ r.executeOnContainer(ctx)
+ r.analyzeStatus(ctx, false)
+ }
+ r.assembleEC(ctx)
default:
- exec.log.Debug("operation finished with error",
- zap.String("error", exec.err.Error()),
+ r.log.Debug(ctx, logs.OperationFinishedWithError,
+ zap.Error(r.err),
)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ if execCnr && errors.As(r.err, &errAccessDenied) {
+ // Local get can't return access denied error, so this error was returned by
+ // write to the output stream. So there is no need to try to find object on other nodes.
+ return
+ }
if execCnr {
- exec.executeOnContainer()
- exec.analyzeStatus(false)
+ r.executeOnContainer(ctx)
+ r.analyzeStatus(ctx, false)
}
}
}
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 09d0c38faa..3efc72065a 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -1,7 +1,9 @@
package getsvc
import (
+ "bytes"
"context"
+ "crypto/ecdsa"
"crypto/rand"
"errors"
"fmt"
@@ -9,12 +11,14 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -23,6 +27,9 @@ import (
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
@@ -56,10 +63,14 @@ type testClient struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) currentEpoch() (uint64, error) {
+func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
return uint64(e), nil
}
+func (e testEpochReceiver) CurrentEpoch() uint64 {
+ return uint64(e)
+}
+
func newTestStorage() *testStorage {
return &testStorage{
inhumed: make(map[string]struct{}),
@@ -68,7 +79,7 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, error) {
+func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
opts := make([]placement.Option, 0, 4)
opts = append(opts,
placement.ForContainer(g.c),
@@ -80,10 +91,13 @@ func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e ui
opts = append(opts, placement.ForObject(*obj))
}
- return placement.NewTraverser(opts...)
+ t, err := placement.NewTraverser(context.Background(), opts...)
+ return t, &containerCore.Container{
+ Value: g.c,
+ }, err
}
-func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
@@ -99,7 +113,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.
return vs, nil
}
-func (c *testClientCache) get(info client.NodeInfo) (getClient, error) {
+func (c *testClientCache) Get(info client.NodeInfo) (remoteStorage, error) {
v, ok := c.clients[network.StringifyGroup(info.AddressGroup())]
if !ok {
return nil, errors.New("could not construct client")
@@ -117,21 +131,6 @@ func newTestClient() *testClient {
}
}
-func (c *testClient) getObject(exec *execCtx, _ client.NodeInfo) (*objectSDK.Object, error) {
- v, ok := c.results[exec.address().EncodeToString()]
- if !ok {
- var errNotFound apistatus.ObjectNotFound
-
- return nil, errNotFound
- }
-
- if v.err != nil {
- return nil, v.err
- }
-
- return cutToRange(v.obj, exec.ctxRange()), nil
-}
-
func (c *testClient) addResult(addr oid.Address, obj *objectSDK.Object, err error) {
c.results[addr.EncodeToString()] = struct {
obj *objectSDK.Object
@@ -139,17 +138,52 @@ func (c *testClient) addResult(addr oid.Address, obj *objectSDK.Object, err erro
}{obj: obj, err: err}
}
-func (s *testStorage) get(exec *execCtx) (*objectSDK.Object, error) {
+func (c *testClient) Get(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) {
+ v, ok := c.results[address.EncodeToString()]
+ if !ok {
+ return nil, new(apistatus.ObjectNotFound)
+ }
+
+ if v.err != nil {
+ return nil, v.err
+ }
+
+ return v.obj, nil
+}
+
+func (c *testClient) Head(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) {
+ return c.Get(ctx, address, requestParams)
+}
+
+func (c *testClient) Range(ctx context.Context, address oid.Address, rng *objectSDK.Range, requestParams RemoteRequestParams) (*objectSDK.Object, error) {
+ obj, err := c.Get(ctx, address, requestParams)
+ if err != nil {
+ return nil, err
+ }
+ return cutToRange(obj, rng), nil
+}
+
+func (c *testClient) ForwardRequest(ctx context.Context, info client.NodeInfo, forwarder RequestForwarder) (*objectSDK.Object, error) {
+ return nil, fmt.Errorf("not implemented")
+}
+
+func (s *testStorage) Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) {
+ return s.Range(ctx, address, nil)
+}
+
+func (s *testStorage) Head(ctx context.Context, address oid.Address, isRaw bool) (*objectSDK.Object, error) {
+ return s.Range(ctx, address, nil)
+}
+
+func (s *testStorage) Range(_ context.Context, address oid.Address, rng *objectSDK.Range) (*objectSDK.Object, error) {
var (
ok bool
obj *objectSDK.Object
- sAddr = exec.address().EncodeToString()
+ sAddr = address.EncodeToString()
)
if _, ok = s.inhumed[sAddr]; ok {
- var errRemoved apistatus.ObjectAlreadyRemoved
-
- return nil, errRemoved
+ return nil, new(apistatus.ObjectAlreadyRemoved)
}
if info, ok := s.virtual[sAddr]; ok {
@@ -157,12 +191,10 @@ func (s *testStorage) get(exec *execCtx) (*objectSDK.Object, error) {
}
if obj, ok = s.phy[sAddr]; ok {
- return cutToRange(obj, exec.ctxRange()), nil
+ return cutToRange(obj, rng), nil
}
- var errNotFound apistatus.ObjectNotFound
-
- return nil, errNotFound
+ return nil, new(apistatus.ObjectNotFound)
}
func cutToRange(o *objectSDK.Object, rng *objectSDK.Range) *objectSDK.Object {
@@ -213,8 +245,7 @@ func (whe *writeHeaderError) Error() string {
return "write header error"
}
-type writeHeaderErrorObjectWriter struct {
-}
+type writeHeaderErrorObjectWriter struct{}
func (w *writeHeaderErrorObjectWriter) WriteHeader(_ context.Context, _ *objectSDK.Object) error {
return &writeHeaderError{}
@@ -230,8 +261,7 @@ func (whe *writePayloadError) Error() string {
return "write payload error"
}
-type writePayloadErrorObjectWriter struct {
-}
+type writePayloadErrorObjectWriter struct{}
func (w *writePayloadErrorObjectWriter) WriteHeader(_ context.Context, _ *objectSDK.Object) error {
return nil
@@ -241,15 +271,20 @@ func (w *writePayloadErrorObjectWriter) WriteChunk(_ context.Context, _ []byte)
return &writePayloadError{}
}
+type testKeyStorage struct{}
+
+func (ks *testKeyStorage) GetKey(_ *util.SessionInfo) (*ecdsa.PrivateKey, error) {
+ return &ecdsa.PrivateKey{}, nil
+}
+
func TestGetLocalOnly(t *testing.T) {
ctx := context.Background()
newSvc := func(storage *testStorage) *Service {
- svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(false)
- svc.localStorage = storage
-
- return svc
+ return &Service{
+ log: test.NewLogger(t),
+ localStorage: storage,
+ }
}
newPrm := func(raw bool, w ObjectWriter) Prm {
@@ -343,19 +378,19 @@ func TestGetLocalOnly(t *testing.T) {
err := svc.Get(ctx, p)
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err))
rngPrm := newRngPrm(false, nil, 0, 0)
rngPrm.WithAddress(addr)
err = svc.GetRange(ctx, rngPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err))
headPrm := newHeadPrm(false, nil)
headPrm.WithAddress(addr)
err = svc.Head(ctx, headPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
+ require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err))
})
t.Run("404", func(t *testing.T) {
@@ -370,20 +405,20 @@ func TestGetLocalOnly(t *testing.T) {
err := svc.Get(ctx, p)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
rngPrm := newRngPrm(false, nil, 0, 0)
rngPrm.WithAddress(addr)
err = svc.GetRange(ctx, rngPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
headPrm := newHeadPrm(false, nil)
headPrm.WithAddress(addr)
err = svc.Head(ctx, headPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
})
t.Run("VIRTUAL", func(t *testing.T) {
@@ -435,7 +470,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
ns := make([]netmap.NodeInfo, dim[i])
as := make([]string, dim[i])
- for j := 0; j < dim[i]; j++ {
+ for j := range dim[i] {
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
strconv.Itoa(i),
strconv.Itoa(60000+j),
@@ -443,6 +478,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
var ni netmap.NodeInfo
ni.SetNetworkEndpoints(a)
+ ni.SetPublicKey([]byte(a))
var na network.AddressGroup
@@ -472,7 +508,7 @@ func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) {
ids := make([]oid.ID, 0, ln)
payload := make([]byte, 0, ln*10)
- for i := 0; i < ln; i++ {
+ for i := range ln {
ids = append(ids, curID)
addr.SetObject(curID)
@@ -506,22 +542,21 @@ func TestGetRemoteSmall(t *testing.T) {
container.CalculateID(&idCnr, cnr)
newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
- svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(false)
- svc.localStorage = newTestStorage()
-
const curEpoch = 13
- svc.traverserGenerator = &testTraverserGenerator{
- c: cnr,
- b: map[uint64]placement.Builder{
- curEpoch: b,
+ return &Service{
+ log: test.NewLogger(t),
+ localStorage: newTestStorage(),
+ traverserGenerator: &testTraverserGenerator{
+ c: cnr,
+ b: map[uint64]placement.Builder{
+ curEpoch: b,
+ },
},
+ epochSource: testEpochReceiver(curEpoch),
+ remoteStorageConstructor: c,
+ keyStore: &testKeyStorage{},
}
- svc.clientCache = c
- svc.currentEpochReceiver = testEpochReceiver(curEpoch)
-
- return svc
}
newPrm := func(raw bool, w ObjectWriter) Prm {
@@ -533,21 +568,6 @@ func TestGetRemoteSmall(t *testing.T) {
return p
}
- newRngPrm := func(raw bool, w ChunkWriter, off, ln uint64) RangePrm {
- p := RangePrm{}
- p.SetChunkWriter(w)
- p.WithRawFlag(raw)
- p.common = new(util.CommonPrm).WithLocalOnly(false)
-
- r := objectSDK.NewRange()
- r.SetOffset(off)
- r.SetLength(ln)
-
- p.SetRange(r)
-
- return p
- }
-
newHeadPrm := func(raw bool, w ObjectWriter) HeadPrm {
p := HeadPrm{}
p.SetHeaderWriter(w)
@@ -649,19 +669,19 @@ func TestGetRemoteSmall(t *testing.T) {
p.WithAddress(addr)
err := svc.Get(ctx, p)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+ require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err))
rngPrm := newRngPrm(false, nil, 0, 0)
rngPrm.WithAddress(addr)
err = svc.GetRange(ctx, rngPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+ require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err))
headPrm := newHeadPrm(false, nil)
headPrm.WithAddress(addr)
err = svc.Head(ctx, headPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+ require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err))
})
t.Run("404", func(t *testing.T) {
@@ -693,24 +713,24 @@ func TestGetRemoteSmall(t *testing.T) {
p.WithAddress(addr)
err := svc.Get(ctx, p)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
rngPrm := newRngPrm(false, nil, 0, 0)
rngPrm.WithAddress(addr)
err = svc.GetRange(ctx, rngPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
headPrm := newHeadPrm(false, nil)
headPrm.WithAddress(addr)
err = svc.Head(ctx, headPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
})
t.Run("VIRTUAL", func(t *testing.T) {
testHeadVirtual := func(svc *Service, addr oid.Address, i *objectSDK.SplitInfo) {
- headPrm := newHeadPrm(false, nil)
+ headPrm := newHeadPrm(true, nil)
headPrm.WithAddress(addr)
errSplit := objectSDK.NewSplitInfoError(objectSDK.NewSplitInfo())
@@ -738,11 +758,11 @@ func TestGetRemoteSmall(t *testing.T) {
c1 := newTestClient()
c1.addResult(addr, nil, errors.New("any error"))
- c1.addResult(splitAddr, nil, apistatus.ObjectNotFound{})
+ c1.addResult(splitAddr, nil, new(apistatus.ObjectNotFound))
c2 := newTestClient()
c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo))
- c2.addResult(splitAddr, nil, apistatus.ObjectNotFound{})
+ c2.addResult(splitAddr, nil, new(apistatus.ObjectNotFound))
builder := &testPlacementBuilder{
vectors: map[string][][]netmap.NodeInfo{
@@ -764,13 +784,13 @@ func TestGetRemoteSmall(t *testing.T) {
p.WithAddress(addr)
err := svc.Get(ctx, p)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
rngPrm := newRngPrm(false, nil, 0, 0)
rngPrm.WithAddress(addr)
err = svc.GetRange(ctx, rngPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
})
t.Run("get chain element failure", func(t *testing.T) {
@@ -815,7 +835,7 @@ func TestGetRemoteSmall(t *testing.T) {
c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo))
c2.addResult(linkAddr, linkingObj, nil)
c2.addResult(child1Addr, children[0], nil)
- c2.addResult(child2Addr, nil, apistatus.ObjectNotFound{})
+ c2.addResult(child2Addr, nil, new(apistatus.ObjectNotFound))
builder := &testPlacementBuilder{
vectors: map[string][][]netmap.NodeInfo{
@@ -839,13 +859,13 @@ func TestGetRemoteSmall(t *testing.T) {
p.WithAddress(addr)
err := svc.Get(ctx, p)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
rngPrm := newRngPrm(false, NewSimpleObjectWriter(), 0, 1)
rngPrm.WithAddress(addr)
err = svc.GetRange(ctx, rngPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
})
t.Run("OK", func(t *testing.T) {
@@ -1176,7 +1196,7 @@ func TestGetRemoteSmall(t *testing.T) {
err := svc.Get(ctx, p)
require.Error(t, err)
- require.Equal(t, err.Error(), "received child with empty parent")
+ require.ErrorIs(t, err, errChildWithEmptyParent)
w = NewSimpleObjectWriter()
payloadSz := srcObj.PayloadSize()
@@ -1189,7 +1209,7 @@ func TestGetRemoteSmall(t *testing.T) {
err = svc.GetRange(ctx, rngPrm)
require.Error(t, err)
- require.Equal(t, err.Error(), "received child with empty parent")
+ require.ErrorIs(t, err, errChildWithEmptyParent)
})
t.Run("out of range", func(t *testing.T) {
@@ -1264,7 +1284,6 @@ func TestGetRemoteSmall(t *testing.T) {
err := svc.GetRange(ctx, p)
require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange))
})
-
})
t.Run("right child", func(t *testing.T) {
@@ -1285,11 +1304,11 @@ func TestGetRemoteSmall(t *testing.T) {
c1 := newTestClient()
c1.addResult(addr, nil, errors.New("any error"))
- c1.addResult(splitAddr, nil, apistatus.ObjectNotFound{})
+ c1.addResult(splitAddr, nil, new(apistatus.ObjectNotFound))
c2 := newTestClient()
c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo))
- c2.addResult(splitAddr, nil, apistatus.ObjectNotFound{})
+ c2.addResult(splitAddr, nil, new(apistatus.ObjectNotFound))
builder := &testPlacementBuilder{
vectors: map[string][][]netmap.NodeInfo{
@@ -1311,13 +1330,13 @@ func TestGetRemoteSmall(t *testing.T) {
p.WithAddress(addr)
err := svc.Get(ctx, p)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
rngPrm := newRngPrm(false, nil, 0, 0)
rngPrm.WithAddress(addr)
err = svc.GetRange(ctx, rngPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
})
t.Run("get chain element failure", func(t *testing.T) {
@@ -1373,19 +1392,19 @@ func TestGetRemoteSmall(t *testing.T) {
testHeadVirtual(svc, addr, splitInfo)
headSvc := newTestClient()
- headSvc.addResult(preRightAddr, nil, apistatus.ObjectNotFound{})
+ headSvc.addResult(preRightAddr, nil, new(apistatus.ObjectNotFound))
p := newPrm(false, NewSimpleObjectWriter())
p.WithAddress(addr)
err := svc.Get(ctx, p)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
rngPrm := newRngPrm(false, nil, 0, 1)
rngPrm.WithAddress(addr)
err = svc.GetRange(ctx, rngPrm)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
})
t.Run("child has different parent", func(t *testing.T) {
@@ -1607,6 +1626,203 @@ func TestGetRemoteSmall(t *testing.T) {
})
}
+type testTarget struct {
+ objects []*objectSDK.Object
+}
+
+func (tt *testTarget) WriteObject(_ context.Context, obj *objectSDK.Object) error {
+ tt.objects = append(tt.objects, obj)
+ return nil
+}
+
+func objectChain(t *testing.T, cnr cid.ID, singleSize, totalSize uint64) (oid.ID, []*objectSDK.Object, *objectSDK.Object, []byte) {
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ tt := new(testTarget)
+ p := transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: &pk.PrivateKey,
+ NextTargetInit: func() transformer.ObjectWriter { return tt },
+ NetworkState: testEpochReceiver(1),
+ MaxSize: singleSize,
+ })
+
+ payload := make([]byte, totalSize)
+ _, err = rand.Read(payload)
+ require.NoError(t, err)
+
+ ver := version.Current()
+ hdr := objectSDK.New()
+ hdr.SetContainerID(cnr)
+ hdr.SetType(objectSDK.TypeRegular)
+ hdr.SetVersion(&ver)
+
+ ctx := context.Background()
+ require.NoError(t, p.WriteHeader(ctx, hdr))
+
+ _, err = p.Write(ctx, payload)
+ require.NoError(t, err)
+
+ res, err := p.Close(ctx)
+ require.NoError(t, err)
+
+ if totalSize <= singleSize {
+ // Small object, no linking.
+ require.Len(t, tt.objects, 1)
+ return res.SelfID, tt.objects, nil, payload
+ }
+
+ return *res.ParentID, tt.objects[:len(tt.objects)-1], tt.objects[len(tt.objects)-1], bytes.Clone(payload)
+}
+
+func newRngPrm(raw bool, w ChunkWriter, off, ln uint64) RangePrm {
+ p := RangePrm{}
+ p.SetChunkWriter(w)
+ p.WithRawFlag(raw)
+ p.common = new(util.CommonPrm)
+
+ r := objectSDK.NewRange()
+ r.SetOffset(off)
+ r.SetLength(ln)
+
+ p.SetRange(r)
+ return p
+}
+
+func TestGetRange(t *testing.T) {
+ var cnr container.Container
+ cnr.SetPlacementPolicy(netmaptest.PlacementPolicy())
+
+ var idCnr cid.ID
+ container.CalculateID(&idCnr, cnr)
+
+ ns, as := testNodeMatrix(t, []int{2})
+
+ testGetRange := func(t *testing.T, svc *Service, addr oid.Address, from, to uint64, payload []byte) {
+ w := NewSimpleObjectWriter()
+ rngPrm := newRngPrm(false, w, from, to-from)
+ rngPrm.WithAddress(addr)
+
+ err := svc.GetRange(context.Background(), rngPrm)
+ require.NoError(t, err)
+ if from == to {
+ require.Nil(t, w.Object().Payload())
+ } else {
+ require.Equal(t, payload[from:to], w.Object().Payload())
+ }
+ }
+
+ newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
+ const curEpoch = 13
+
+ return &Service{
+ log: test.NewLogger(t),
+ localStorage: newTestStorage(),
+ traverserGenerator: &testTraverserGenerator{
+ c: cnr,
+ b: map[uint64]placement.Builder{
+ curEpoch: b,
+ },
+ },
+ epochSource: testEpochReceiver(curEpoch),
+ remoteStorageConstructor: c,
+ keyStore: &testKeyStorage{},
+ }
+ }
+
+ t.Run("small", func(t *testing.T) {
+ const totalSize = 5
+ _, objs, _, payload := objectChain(t, idCnr, totalSize, totalSize)
+ require.Len(t, objs, 1)
+ require.Len(t, payload, totalSize)
+
+ obj := objs[0]
+ addr := object.AddressOf(obj)
+ builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{addr.EncodeToString(): ns}}
+
+ c1 := newTestClient()
+ c1.addResult(addr, obj, nil)
+
+ svc := newSvc(builder, &testClientCache{
+ clients: map[string]*testClient{
+ as[0][0]: c1,
+ as[0][1]: c1,
+ },
+ })
+
+ for from := range totalSize - 1 {
+ for to := from; to < totalSize; to++ {
+ t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
+ testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
+ })
+ }
+ }
+ })
+ t.Run("big", func(t *testing.T) {
+ const totalSize = 9
+ id, objs, link, payload := objectChain(t, idCnr, 3, totalSize) // 3 parts
+ require.Equal(t, totalSize, len(payload))
+
+ builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{}}
+ builder.vectors[idCnr.EncodeToString()+"/"+id.EncodeToString()] = ns
+ builder.vectors[object.AddressOf(link).EncodeToString()] = ns
+ for i := range objs {
+ builder.vectors[object.AddressOf(objs[i]).EncodeToString()] = ns
+ }
+
+ var addr oid.Address
+ addr.SetContainer(idCnr)
+ addr.SetObject(id)
+
+ const (
+ linkingLast = "splitinfo=last"
+ linkingChildren = "splitinfo=children"
+ linkingBoth = "splitinfo=both"
+ )
+
+ lastID, _ := objs[len(objs)-1].ID()
+ linkID, _ := link.ID()
+
+ for _, kind := range []string{linkingLast, linkingChildren, linkingBoth} {
+ t.Run(kind, func(t *testing.T) {
+ c1 := newTestClient()
+ for i := range objs {
+ c1.addResult(object.AddressOf(objs[i]), objs[i], nil)
+ }
+
+ c1.addResult(object.AddressOf(link), link, nil)
+
+ si := objectSDK.NewSplitInfo()
+ switch kind {
+ case linkingLast:
+ si.SetLastPart(lastID)
+ case linkingChildren:
+ si.SetLink(linkID)
+ case linkingBoth:
+ si.SetLastPart(lastID)
+ si.SetLink(linkID)
+ }
+ c1.addResult(addr, nil, objectSDK.NewSplitInfoError(si))
+
+ svc := newSvc(builder, &testClientCache{
+ clients: map[string]*testClient{
+ as[0][0]: c1,
+ as[0][1]: c1,
+ },
+ })
+
+ for from := range totalSize - 1 {
+ for to := from; to < totalSize; to++ {
+ t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
+ testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
+ })
+ }
+ }
+ })
+ }
+ })
+}
+
func TestGetFromPastEpoch(t *testing.T) {
ctx := context.Background()
@@ -1639,39 +1855,38 @@ func TestGetFromPastEpoch(t *testing.T) {
c22 := newTestClient()
c22.addResult(addr, obj, nil)
- svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(false)
- svc.localStorage = newTestStorage()
-
const curEpoch = 13
- svc.traverserGenerator = &testTraverserGenerator{
- c: cnr,
- b: map[uint64]placement.Builder{
- curEpoch: &testPlacementBuilder{
- vectors: map[string][][]netmap.NodeInfo{
- addr.EncodeToString(): ns[:1],
+ svc := &Service{
+ log: test.NewLogger(t),
+ localStorage: newTestStorage(),
+ epochSource: testEpochReceiver(curEpoch),
+ traverserGenerator: &testTraverserGenerator{
+ c: cnr,
+ b: map[uint64]placement.Builder{
+ curEpoch: &testPlacementBuilder{
+ vectors: map[string][][]netmap.NodeInfo{
+ addr.EncodeToString(): ns[:1],
+ },
},
- },
- curEpoch - 1: &testPlacementBuilder{
- vectors: map[string][][]netmap.NodeInfo{
- addr.EncodeToString(): ns[1:],
+ curEpoch - 1: &testPlacementBuilder{
+ vectors: map[string][][]netmap.NodeInfo{
+ addr.EncodeToString(): ns[1:],
+ },
},
},
},
- }
-
- svc.clientCache = &testClientCache{
- clients: map[string]*testClient{
- as[0][0]: c11,
- as[0][1]: c12,
- as[1][0]: c21,
- as[1][1]: c22,
+ remoteStorageConstructor: &testClientCache{
+ clients: map[string]*testClient{
+ as[0][0]: c11,
+ as[0][1]: c12,
+ as[1][0]: c21,
+ as[1][1]: c22,
+ },
},
+ keyStore: &testKeyStorage{},
}
- svc.currentEpochReceiver = testEpochReceiver(curEpoch)
-
w := NewSimpleObjectWriter()
commonPrm := new(util.CommonPrm)
@@ -1682,7 +1897,7 @@ func TestGetFromPastEpoch(t *testing.T) {
p.WithAddress(addr)
err := svc.Get(ctx, p)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
commonPrm.SetNetmapLookupDepth(1)
@@ -1705,7 +1920,7 @@ func TestGetFromPastEpoch(t *testing.T) {
rp.SetRange(r)
err = svc.GetRange(ctx, rp)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
w = NewSimpleObjectWriter()
rp.SetChunkWriter(w)
@@ -1722,7 +1937,7 @@ func TestGetFromPastEpoch(t *testing.T) {
hp.WithAddress(addr)
err = svc.Head(ctx, hp)
- require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
+ require.True(t, clientSDK.IsErrObjectNotFound(err))
w = NewSimpleObjectWriter()
hp.SetHeaderWriter(w)
diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go
new file mode 100644
index 0000000000..83ef547449
--- /dev/null
+++ b/pkg/services/object/get/getrangeec_test.go
@@ -0,0 +1,182 @@
+package getsvc
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "testing"
+
+ coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+type containerStorage struct {
+ cnt *container.Container
+}
+
+func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) {
+ coreCnt := coreContainer.Container{
+ Value: *cs.cnt,
+ }
+ return &coreCnt, nil
+}
+
+func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
+ return nil, nil
+}
+
+func TestGetRangeEC(t *testing.T) {
+ var dataCount uint32 = 3
+ var parityCount uint32 = 1
+ cnr := container.Container{}
+ p := netmap.PlacementPolicy{}
+ p.SetContainerBackupFactor(1)
+ x := netmap.ReplicaDescriptor{}
+ x.SetECDataCount(dataCount)
+ x.SetECParityCount(parityCount)
+ p.AddReplicas(x)
+ cnr.SetPlacementPolicy(p)
+
+ var idCnr cid.ID
+ container.CalculateID(&idCnr, cnr)
+
+ ns, as := testNodeMatrix(t, []int{4})
+
+ testGetRange := func(t *testing.T, svc *Service, addr oid.Address, from, to uint64, payload []byte) {
+ w := NewSimpleObjectWriter()
+ rngPrm := newRngPrm(false, w, from, to-from)
+ rngPrm.WithAddress(addr)
+
+ err := svc.GetRange(context.Background(), rngPrm)
+ require.NoError(t, err)
+ if from == to {
+ require.Nil(t, w.Object().Payload())
+ } else {
+ require.Equal(t, payload[from:to], w.Object().Payload())
+ }
+ }
+
+ newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
+ const curEpoch = 13
+
+ return &Service{
+ log: test.NewLogger(t),
+ localStorage: newTestStorage(),
+ traverserGenerator: &testTraverserGenerator{
+ c: cnr,
+ b: map[uint64]placement.Builder{
+ curEpoch: b,
+ },
+ },
+ epochSource: testEpochReceiver(curEpoch),
+ remoteStorageConstructor: c,
+ keyStore: &testKeyStorage{},
+ containerSource: &containerStorage{
+ cnt: &cnr,
+ },
+ }
+ }
+ const totalSize = 5
+ obj, parts := objectECChain(t, &idCnr, &cnr, totalSize, totalSize)
+ require.Len(t, parts, int(dataCount+parityCount))
+ require.Len(t, obj.Payload(), totalSize)
+
+ addr := object.AddressOf(obj)
+ builder := &testPlacementBuilder{
+ vectors: map[string][][]netmap.NodeInfo{
+ addr.EncodeToString(): ns,
+ },
+ }
+
+ clients := map[string]*testClient{}
+ for i, part := range parts {
+ builder.vectors[object.AddressOf(part).EncodeToString()] = ns
+
+ tc := newTestClient()
+
+ ecInfo := objectSDK.NewECInfo()
+
+ chunk := objectSDK.ECChunk{}
+ chunk.Total = uint32(len(parts))
+ chunk.Index = uint32(i)
+ id, _ := part.ID()
+ idv2 := refs.ObjectID{}
+ id.WriteToV2(&idv2)
+ chunk.ID = idv2
+
+ ecInfo.AddChunk(chunk)
+ errECInfo := objectSDK.NewECInfoError(ecInfo)
+
+ tc.addResult(addr, nil, errECInfo)
+ tc.addResult(object.AddressOf(part), part, nil)
+
+ clients[as[0][i]] = tc
+ }
+
+ svc := newSvc(builder, &testClientCache{
+ clients: clients,
+ })
+
+ for from := range totalSize - 1 {
+ for to := from; to < totalSize; to++ {
+ t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
+ testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload())
+ })
+ }
+ }
+}
+
+func objectECChain(t *testing.T, cnrId *cid.ID, cnr *container.Container, singleSize, totalSize uint64) (*objectSDK.Object, []*objectSDK.Object) {
+ pk, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ tt := new(testTarget)
+ p := transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: &pk.PrivateKey,
+ NextTargetInit: func() transformer.ObjectWriter { return tt },
+ NetworkState: testEpochReceiver(1),
+ MaxSize: singleSize,
+ })
+
+ payload := make([]byte, totalSize)
+ _, err = rand.Read(payload)
+ require.NoError(t, err)
+
+ ver := version.Current()
+ hdr := objectSDK.New()
+ hdr.SetContainerID(*cnrId)
+ hdr.SetType(objectSDK.TypeRegular)
+ hdr.SetVersion(&ver)
+
+ ctx := context.Background()
+ require.NoError(t, p.WriteHeader(ctx, hdr))
+
+ _, err = p.Write(ctx, payload)
+ require.NoError(t, err)
+
+ _, err = p.Close(ctx)
+ require.NoError(t, err)
+
+ require.Len(t, tt.objects, 1)
+
+ c, err := erasurecode.NewConstructor(policy.ECDataCount(cnr.PlacementPolicy()), policy.ECParityCount(cnr.PlacementPolicy()))
+ require.NoError(t, err)
+ parts, err := c.Split(tt.objects[0], &pk.PrivateKey)
+ require.NoError(t, err)
+
+ return tt.objects[0], parts
+}
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index f526af4e66..cfabb082f2 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -1,43 +1,63 @@
package getsvc
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
-func (exec *execCtx) executeLocal() {
+func (r *request) executeLocal(ctx context.Context) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "getService.executeLocal")
+ defer func() {
+ span.End()
+ }()
+
var err error
- exec.collectedObject, err = exec.svc.localStorage.get(exec)
+ r.collectedObject, err = r.get(ctx)
var errSplitInfo *objectSDK.SplitInfoError
- var errRemoved apistatus.ObjectAlreadyRemoved
- var errOutOfRange apistatus.ObjectOutOfRange
+ var errECInfo *objectSDK.ECInfoError
+ var errRemoved *apistatus.ObjectAlreadyRemoved
+ var errOutOfRange *apistatus.ObjectOutOfRange
switch {
default:
- exec.status = statusUndefined
- exec.err = err
+ r.status = statusUndefined
+ r.err = err
- exec.log.Debug("local get failed",
- zap.String("error", err.Error()),
- )
+ r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err))
case err == nil:
- exec.status = statusOK
- exec.err = nil
- exec.writeCollectedObject()
+ r.status = statusOK
+ r.err = nil
+ r.writeCollectedObject(ctx)
case errors.As(err, &errRemoved):
- exec.status = statusINHUMED
- exec.err = errRemoved
+ r.status = statusINHUMED
+ r.err = errRemoved
case errors.As(err, &errSplitInfo):
- exec.status = statusVIRTUAL
- mergeSplitInfo(exec.splitInfo(), errSplitInfo.SplitInfo())
- exec.err = objectSDK.NewSplitInfoError(exec.infoSplit)
+ r.status = statusVIRTUAL
+ mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo())
+ r.err = objectSDK.NewSplitInfoError(r.infoSplit)
+ case errors.As(err, &errECInfo):
+ r.status = statusEC
+ r.err = r.infoEC.addLocal(errECInfo.ECInfo())
case errors.As(err, &errOutOfRange):
- exec.status = statusOutOfRange
- exec.err = errOutOfRange
+ r.status = statusOutOfRange
+ r.err = errOutOfRange
}
}
+
+func (r *request) get(ctx context.Context) (*objectSDK.Object, error) {
+ if r.headOnly() {
+ return r.localStorage.Head(ctx, r.address(), r.isRaw())
+ }
+ if rng := r.ctxRange(); rng != nil {
+ return r.localStorage.Range(ctx, r.address(), rng)
+ }
+ return r.localStorage.Get(ctx, r.address())
+}
diff --git a/pkg/services/object/get/prm.go b/pkg/services/object/get/prm.go
index 88848264e6..94c07381c5 100644
--- a/pkg/services/object/get/prm.go
+++ b/pkg/services/object/get/prm.go
@@ -3,12 +3,11 @@ package getsvc
import (
"context"
"crypto/ecdsa"
- "errors"
"hash"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -21,14 +20,9 @@ type Prm struct {
type RangePrm struct {
commonPrm
- rng *object.Range
+ rng *objectSDK.Range
}
-var (
- errRangeZeroLength = errors.New("zero range length")
- errRangeOverflow = errors.New("range overflow")
-)
-
// Validate pre-validates `OBJECTRANGE` request's parameters content
// without access to the requested object's payload.
func (p RangePrm) Validate() error {
@@ -54,12 +48,18 @@ type RangeHashPrm struct {
hashGen func() hash.Hash
- rngs []object.Range
+ rngs []objectSDK.Range
salt []byte
}
-type RequestForwarder func(coreclient.NodeInfo, coreclient.MultiAddressClient) (*object.Object, error)
+type RequestParameters struct {
+ commonPrm
+ head bool
+ rng *objectSDK.Range
+}
+
+type RequestForwarder func(context.Context, coreclient.NodeInfo, coreclient.MultiAddressClient) (*objectSDK.Object, error)
// HeadPrm groups parameters of Head service call.
type HeadPrm struct {
@@ -83,43 +83,25 @@ type commonPrm struct {
signerKey *ecdsa.PrivateKey
}
-// ChunkWriter is an interface of target component
-// to write payload chunk.
-type ChunkWriter interface {
- WriteChunk(context.Context, []byte) error
-}
-
-// HeaderWriter is an interface of target component
-// to write object header.
-type HeaderWriter interface {
- WriteHeader(context.Context, *object.Object) error
-}
-
-// ObjectWriter is an interface of target component to write object.
-type ObjectWriter interface {
- HeaderWriter
- ChunkWriter
-}
-
// SetObjectWriter sets target component to write the object.
func (p *Prm) SetObjectWriter(w ObjectWriter) {
p.objWriter = w
}
// SetChunkWriter sets target component to write the object payload range.
-func (p *RangePrm) SetChunkWriter(w ChunkWriter) {
+func (p *commonPrm) SetChunkWriter(w ChunkWriter) {
p.objWriter = &partWriter{
chunkWriter: w,
}
}
// SetRange sets range of the requested payload data.
-func (p *RangePrm) SetRange(rng *object.Range) {
+func (p *RangePrm) SetRange(rng *objectSDK.Range) {
p.rng = rng
}
// SetRangeList sets a list of object payload ranges.
-func (p *RangeHashPrm) SetRangeList(rngs []object.Range) {
+func (p *RangeHashPrm) SetRangeList(rngs []objectSDK.Range) {
p.rngs = rngs
}
@@ -142,6 +124,10 @@ func (p *commonPrm) SetRequestForwarder(f RequestForwarder) {
p.forwarder = f
}
+func (p *commonPrm) SetSignerKey(signerKey *ecdsa.PrivateKey) {
+ p.signerKey = signerKey
+}
+
// WithAddress sets object address to be read.
func (p *commonPrm) WithAddress(addr oid.Address) {
p.addr = addr
@@ -158,7 +144,7 @@ func (p *commonPrm) WithCachedSignerKey(signerKey *ecdsa.PrivateKey) {
}
// SetHeaderWriter sets target component to write the object header.
-func (p *HeadPrm) SetHeaderWriter(w HeaderWriter) {
+func (p *commonPrm) SetHeaderWriter(w HeaderWriter) {
p.objWriter = &partWriter{
headWriter: w,
}
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index da310d2492..78ca5b5e35 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -2,60 +2,162 @@ package getsvc
import (
"context"
+ "encoding/hex"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
-func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool {
- exec.log.Debug("processing node...")
+func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
+ defer span.End()
- client, ok := exec.remoteClient(info)
+ r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
+
+ rs, ok := r.getRemoteStorage(ctx, info)
if !ok {
return true
}
- obj, err := client.getObject(exec, info)
+ obj, err := r.getRemote(ctx, rs, info)
var errSplitInfo *objectSDK.SplitInfoError
+ var errECInfo *objectSDK.ECInfoError
var errRemoved *apistatus.ObjectAlreadyRemoved
var errOutOfRange *apistatus.ObjectOutOfRange
+ var errAccessDenied *apistatus.ObjectAccessDenied
switch {
default:
- var errNotFound apistatus.ObjectNotFound
-
- exec.status = statusUndefined
- exec.err = errNotFound
-
- exec.log.Debug("remote call failed",
- zap.String("error", err.Error()),
- )
+ r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err))
+ if r.status != statusEC {
+ // for raw requests, continue to collect other parts
+ r.status = statusUndefined
+ if errors.As(err, &errAccessDenied) {
+ r.err = err
+ } else if r.err == nil || !errors.As(r.err, &errAccessDenied) {
+ r.err = new(apistatus.ObjectNotFound)
+ }
+ }
+ return false
case err == nil:
- exec.status = statusOK
- exec.err = nil
+ r.status = statusOK
+ r.err = nil
// both object and err are nil only if the original
// request was forwarded to another node and the object
// has already been streamed to the requesting party
if obj != nil {
- exec.collectedObject = obj
- exec.writeCollectedObject()
+ r.collectedObject = obj
+ r.writeCollectedObject(ctx)
}
+ return true
case errors.As(err, &errRemoved):
- exec.status = statusINHUMED
- exec.err = errRemoved
+ r.status = statusINHUMED
+ r.err = errRemoved
+ return true
case errors.As(err, &errOutOfRange):
- exec.status = statusOutOfRange
- exec.err = errOutOfRange
+ r.status = statusOutOfRange
+ r.err = errOutOfRange
+ return true
case errors.As(err, &errSplitInfo):
- exec.status = statusVIRTUAL
- mergeSplitInfo(exec.splitInfo(), errSplitInfo.SplitInfo())
- exec.err = objectSDK.NewSplitInfoError(exec.infoSplit)
+ r.status = statusVIRTUAL
+ mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo())
+ r.err = objectSDK.NewSplitInfoError(r.infoSplit)
+ return true
+ case errors.As(err, &errECInfo):
+ r.status = statusEC
+ r.err = r.infoEC.addRemote(string(info.PublicKey()), errECInfo.ECInfo())
+ if r.isRaw() {
+ return false // continue to collect all parts
+ }
+ return true
+ }
+}
+
+func (r *request) getRemote(ctx context.Context, rs remoteStorage, info client.NodeInfo) (*objectSDK.Object, error) {
+ if r.isForwardingEnabled() {
+ return rs.ForwardRequest(ctx, info, r.prm.forwarder)
}
- return exec.status != statusUndefined
+ key, err := r.key()
+ if err != nil {
+ return nil, err
+ }
+
+ prm := RemoteRequestParams{
+ Epoch: r.curProcEpoch,
+ TTL: r.prm.common.TTL(),
+ PrivateKey: key,
+ SessionToken: r.prm.common.SessionToken(),
+ BearerToken: r.prm.common.BearerToken(),
+ XHeaders: r.prm.common.XHeaders(),
+ IsRaw: r.isRaw(),
+ }
+
+ if r.headOnly() {
+ return rs.Head(ctx, r.address(), prm)
+ }
+ // we don't specify payload writer because we accumulate
+ // the object locally (even huge).
+ if rng := r.ctxRange(); rng != nil {
+ // Current spec allows other storage node to deny access,
+ // fallback to GET here.
+ return rs.Range(ctx, r.address(), rng, prm)
+ }
+
+ return rs.Get(ctx, r.address(), prm)
+}
+
+func (r *request) getObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo) (*objectSDK.Object, error) {
+ rs, err := r.remoteStorageConstructor.Get(info)
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := r.key()
+ if err != nil {
+ return nil, err
+ }
+
+ prm := RemoteRequestParams{
+ Epoch: r.curProcEpoch,
+ TTL: 1,
+ PrivateKey: key,
+ SessionToken: r.prm.common.SessionToken(),
+ BearerToken: r.prm.common.BearerToken(),
+ XHeaders: r.prm.common.XHeaders(),
+ }
+
+ return rs.Get(ctx, addr, prm)
+}
+
+func (r *request) headObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo, raw bool) (*objectSDK.Object, error) {
+ rs, err := r.remoteStorageConstructor.Get(info)
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := r.key()
+ if err != nil {
+ return nil, err
+ }
+
+ prm := RemoteRequestParams{
+ Epoch: r.curProcEpoch,
+ TTL: 1,
+ PrivateKey: key,
+ SessionToken: r.prm.common.SessionToken(),
+ BearerToken: r.prm.common.BearerToken(),
+ XHeaders: r.prm.common.XHeaders(),
+ IsRaw: raw,
+ }
+
+ return rs.Head(ctx, addr, prm)
}
diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go
new file mode 100644
index 0000000000..2c64244cf5
--- /dev/null
+++ b/pkg/services/object/get/remote_getter.go
@@ -0,0 +1,55 @@
+package getsvc
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type RemoteGetPrm struct {
+ Address oid.Address
+ Node netmapSDK.NodeInfo
+}
+
+type RemoteGetter struct {
+ s remoteStorageConstructor
+ es epochSource
+ ks keyStorage
+}
+
+func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Object, error) {
+ var nodeInfo client.NodeInfo
+ if err := client.NodeInfoFromRawNetmapElement(&nodeInfo, netmapCore.Node(prm.Node)); err != nil {
+ return nil, err
+ }
+ rs, err := g.s.Get(nodeInfo)
+ if err != nil {
+ return nil, err
+ }
+ epoch, err := g.es.Epoch(ctx)
+ if err != nil {
+ return nil, err
+ }
+ key, err := g.ks.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+ r := RemoteRequestParams{
+ Epoch: epoch,
+ TTL: 1,
+ PrivateKey: key,
+ }
+ return rs.Get(ctx, prm.Address, r)
+}
+
+func NewRemoteGetter(cc clientConstructor, es epochSource, ks keyStorage) *RemoteGetter {
+ return &RemoteGetter{
+ s: &multiclientRemoteStorageConstructor{clientConstructor: cc},
+ es: es,
+ ks: ks,
+ }
+}
diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go
new file mode 100644
index 0000000000..268080486a
--- /dev/null
+++ b/pkg/services/object/get/request.go
@@ -0,0 +1,248 @@
+package getsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+)
+
+type request struct {
+ prm RequestParameters
+
+ statusError
+
+ infoSplit *objectSDK.SplitInfo
+
+ infoEC *ecInfo
+
+ log *logger.Logger
+
+ collectedObject *objectSDK.Object
+
+ curProcEpoch uint64
+
+ keyStore keyStorage
+ epochSource epochSource
+ traverserGenerator traverserGenerator
+ remoteStorageConstructor remoteStorageConstructor
+ localStorage localStorage
+ containerSource container.Source
+}
+
+func (r *request) setLogger(l *logger.Logger) {
+ req := "GET"
+ if r.headOnly() {
+ req = "HEAD"
+ } else if r.ctxRange() != nil {
+ req = "GET_RANGE"
+ }
+
+ r.log = l.With(
+ zap.String("request", req),
+ zap.Stringer("address", r.address()),
+ zap.Bool("raw", r.isRaw()),
+ zap.Bool("local", r.isLocal()),
+ zap.Bool("with session", r.prm.common.SessionToken() != nil),
+ zap.Bool("with bearer", r.prm.common.BearerToken() != nil),
+ )
+}
+
+func (r *request) isLocal() bool {
+ return r.prm.common.LocalOnly()
+}
+
+func (r *request) isRaw() bool {
+ return r.prm.raw
+}
+
+func (r *request) address() oid.Address {
+ return r.prm.addr
+}
+
+func (r *request) key() (*ecdsa.PrivateKey, error) {
+ if r.prm.signerKey != nil {
+ // the key has already been requested and
+ // cached in the previous operations
+ return r.prm.signerKey, nil
+ }
+
+ var sessionInfo *util.SessionInfo
+
+ if tok := r.prm.common.SessionToken(); tok != nil {
+ sessionInfo = &util.SessionInfo{
+ ID: tok.ID(),
+ Owner: tok.Issuer(),
+ }
+ }
+
+ return r.keyStore.GetKey(sessionInfo)
+}
+
+func (r *request) canAssembleComplexObject() bool {
+ return !r.isRaw()
+}
+
+func (r *request) splitInfo() *objectSDK.SplitInfo {
+ return r.infoSplit
+}
+
+func (r *request) containerID() cid.ID {
+ return r.address().Container()
+}
+
+func (r *request) ctxRange() *objectSDK.Range {
+ return r.prm.rng
+}
+
+func (r *request) headOnly() bool {
+ return r.prm.head
+}
+
+func (r *request) netmapEpoch() uint64 {
+ return r.prm.common.NetmapEpoch()
+}
+
+func (r *request) netmapLookupDepth() uint64 {
+ return r.prm.common.NetmapLookupDepth()
+}
+
+func (r *request) initEpoch(ctx context.Context) bool {
+ r.curProcEpoch = r.netmapEpoch()
+ if r.curProcEpoch > 0 {
+ return true
+ }
+
+ e, err := r.epochSource.Epoch(ctx)
+
+ switch {
+ default:
+ r.status = statusUndefined
+ r.err = err
+
+ r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
+
+ return false
+ case err == nil:
+ r.curProcEpoch = e
+ return true
+ }
+}
+
+func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) {
+ obj := addr.Object()
+
+ t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch)
+
+ switch {
+ default:
+ r.status = statusUndefined
+ r.err = err
+
+ r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
+
+ return nil, false
+ case err == nil:
+ return t, true
+ }
+}
+
+func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) {
+ rs, err := r.remoteStorageConstructor.Get(info)
+ if err != nil {
+ r.status = statusUndefined
+ r.err = err
+
+ r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient)
+
+ return nil, false
+ }
+
+ return rs, true
+}
+
+func (r *request) writeCollectedHeader(ctx context.Context) bool {
+ if r.ctxRange() != nil {
+ return true
+ }
+
+ err := r.prm.objWriter.WriteHeader(
+ ctx,
+ r.collectedObject.CutPayload(),
+ )
+
+ switch {
+ default:
+ r.status = statusUndefined
+ r.err = err
+
+ r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err))
+ case err == nil:
+ r.status = statusOK
+ r.err = nil
+ }
+
+ return r.status == statusOK
+}
+
+func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object) bool {
+ if r.headOnly() {
+ return true
+ }
+
+ err := r.prm.objWriter.WriteChunk(ctx, obj.Payload())
+
+ switch {
+ default:
+ r.status = statusUndefined
+ r.err = err
+
+ r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err))
+ case err == nil:
+ r.status = statusOK
+ r.err = nil
+ }
+
+ return err == nil
+}
+
+func (r *request) writeCollectedObject(ctx context.Context) {
+ if ok := r.writeCollectedHeader(ctx); ok {
+ r.writeObjectPayload(ctx, r.collectedObject)
+ }
+}
+
+// isForwardingEnabled returns true if common execution
+// parameters has request forwarding closure set.
+func (r request) isForwardingEnabled() bool {
+ return r.prm.forwarder != nil
+}
+
+// disableForwarding removes request forwarding closure from common
+// parameters, so it won't be inherited in new execution contexts.
+func (r *request) disableForwarding() {
+ r.prm.SetRequestForwarder(nil)
+}
+
+func mergeSplitInfo(dst, src *objectSDK.SplitInfo) {
+ if last, ok := src.LastPart(); ok {
+ dst.SetLastPart(last)
+ }
+
+ if link, ok := src.Link(); ok {
+ dst.SetLink(link)
+ }
+
+ if splitID := src.SplitID(); splitID != nil {
+ dst.SetSplitID(splitID)
+ }
+}
diff --git a/pkg/services/object/get/res.go b/pkg/services/object/get/res.go
deleted file mode 100644
index 75a5aaedde..0000000000
--- a/pkg/services/object/get/res.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package getsvc
-
-type RangeHashRes struct {
- hashes [][]byte
-}
-
-func (r *RangeHashRes) Hashes() [][]byte {
- return r.hashes
-}
diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go
index e69ab4f0f6..a103f5a7fa 100644
--- a/pkg/services/object/get/service.go
+++ b/pkg/services/object/get/service.go
@@ -1,122 +1,58 @@
package getsvc
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
+// Option is a Service's constructor option.
+type Option func(*Service)
+
// Service utility serving requests of Object.Get service.
type Service struct {
- *cfg
-}
-
-// Option is a Service's constructor option.
-type Option func(*cfg)
-
-type getClient interface {
- getObject(*execCtx, client.NodeInfo) (*object.Object, error)
-}
-
-type cfg struct {
- log *logger.Logger
-
- localStorage interface {
- get(*execCtx) (*object.Object, error)
- }
-
- clientCache interface {
- get(client.NodeInfo) (getClient, error)
- }
-
- traverserGenerator interface {
- GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, error)
- }
-
- currentEpochReceiver interface {
- currentEpoch() (uint64, error)
- }
-
- keyStore *util.KeyStorage
-}
-
-func defaultCfg() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- localStorage: new(storageEngineWrapper),
- clientCache: new(clientCacheWrapper),
- }
+ log *logger.Logger
+ localStorage localStorage
+ traverserGenerator traverserGenerator
+ epochSource epochSource
+ keyStore keyStorage
+ remoteStorageConstructor remoteStorageConstructor
+ containerSource container.Source
}
// New creates, initializes and returns utility serving
// Object.Get service requests.
-func New(opts ...Option) *Service {
- c := defaultCfg()
-
- for i := range opts {
- opts[i](c)
+func New(
+ ks keyStorage,
+ es epochSource,
+ e localStorageEngine,
+ tg traverserGenerator,
+ cc clientConstructor,
+ cs container.Source,
+ opts ...Option,
+) *Service {
+ result := &Service{
+ keyStore: ks,
+ epochSource: es,
+ log: logger.NewLoggerWrapper(zap.L()),
+ localStorage: &engineLocalStorage{
+ engine: e,
+ },
+ traverserGenerator: tg,
+ remoteStorageConstructor: &multiclientRemoteStorageConstructor{
+ clientConstructor: cc,
+ },
+ containerSource: cs,
}
-
- return &Service{
- cfg: c,
+ for _, option := range opts {
+ option(result)
}
+ return result
}
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))}
- }
-}
-
-// WithLocalStorageEngine returns option to set local storage
-// instance.
-func WithLocalStorageEngine(e *engine.StorageEngine) Option {
- return func(c *cfg) {
- c.localStorage.(*storageEngineWrapper).engine = e
- }
-}
-
-type ClientConstructor interface {
- Get(client.NodeInfo) (client.MultiAddressClient, error)
-}
-
-// WithClientConstructor returns option to set constructor of remote node clients.
-func WithClientConstructor(v ClientConstructor) Option {
- return func(c *cfg) {
- c.clientCache.(*clientCacheWrapper).cache = v
- }
-}
-
-// WithTraverserGenerator returns option to set generator of
-// placement traverser to get the objects from containers.
-func WithTraverserGenerator(t *util.TraverserGenerator) Option {
- return func(c *cfg) {
- c.traverserGenerator = t
- }
-}
-
-// WithNetMapSource returns option to set network
-// map storage to receive current network state.
-func WithNetMapSource(nmSrc netmap.Source) Option {
- return func(c *cfg) {
- c.currentEpochReceiver = &nmSrcWrapper{
- nmSrc: nmSrc,
- }
- }
-}
-
-// WithKeyStorage returns option to set private
-// key storage for session tokens and node key.
-func WithKeyStorage(store *util.KeyStorage) Option {
- return func(c *cfg) {
- c.keyStore = store
+ return func(s *Service) {
+ s.log = l
}
}
diff --git a/pkg/services/object/get/status.go b/pkg/services/object/get/status.go
new file mode 100644
index 0000000000..919338d7fb
--- /dev/null
+++ b/pkg/services/object/get/status.go
@@ -0,0 +1,15 @@
+package getsvc
+
+const (
+ statusUndefined int = iota
+ statusOK
+ statusINHUMED
+ statusVIRTUAL
+ statusOutOfRange
+ statusEC
+)
+
+type statusError struct {
+ status int
+ err error
+}
diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go
new file mode 100644
index 0000000000..664366d1b4
--- /dev/null
+++ b/pkg/services/object/get/types.go
@@ -0,0 +1,287 @@
+package getsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+
+ coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+)
+
+type epochSource interface {
+ Epoch(ctx context.Context) (uint64, error)
+}
+
+type traverserGenerator interface {
+ GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+}
+
+type keyStorage interface {
+ GetKey(info *util.SessionInfo) (*ecdsa.PrivateKey, error)
+}
+
+type localStorageEngine interface {
+ Head(ctx context.Context, p engine.HeadPrm) (engine.HeadRes, error)
+ GetRange(ctx context.Context, p engine.RngPrm) (engine.RngRes, error)
+ Get(ctx context.Context, p engine.GetPrm) (engine.GetRes, error)
+}
+
+type clientConstructor interface {
+ Get(coreclient.NodeInfo) (coreclient.MultiAddressClient, error)
+}
+
+type remoteStorageConstructor interface {
+ Get(coreclient.NodeInfo) (remoteStorage, error)
+}
+
+type multiclientRemoteStorageConstructor struct {
+ clientConstructor clientConstructor
+}
+
+func (c *multiclientRemoteStorageConstructor) Get(info coreclient.NodeInfo) (remoteStorage, error) {
+ clt, err := c.clientConstructor.Get(info)
+ if err != nil {
+ return nil, err
+ }
+
+ return &multiaddressRemoteStorage{
+ client: clt,
+ }, nil
+}
+
+type localStorage interface {
+ Head(ctx context.Context, address oid.Address, isRaw bool) (*objectSDK.Object, error)
+ Range(ctx context.Context, address oid.Address, rng *objectSDK.Range) (*objectSDK.Object, error)
+ Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error)
+}
+
+type engineLocalStorage struct {
+ engine localStorageEngine
+}
+
+func (s *engineLocalStorage) Head(ctx context.Context, address oid.Address, isRaw bool) (*objectSDK.Object, error) {
+ var headPrm engine.HeadPrm
+ headPrm.WithAddress(address)
+ headPrm.WithRaw(isRaw)
+
+ r, err := s.engine.Head(ctx, headPrm)
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Header(), nil
+}
+
+func (s *engineLocalStorage) Range(ctx context.Context, address oid.Address, rng *objectSDK.Range) (*objectSDK.Object, error) {
+ var getRange engine.RngPrm
+ getRange.WithAddress(address)
+ getRange.WithPayloadRange(rng)
+
+ r, err := s.engine.GetRange(ctx, getRange)
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Object(), nil
+}
+
+func (s *engineLocalStorage) Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) {
+ var getPrm engine.GetPrm
+ getPrm.WithAddress(address)
+
+ r, err := s.engine.Get(ctx, getPrm)
+ if err != nil {
+ return nil, err
+ }
+
+ return r.Object(), nil
+}
+
+type RemoteRequestParams struct {
+ Epoch uint64
+ TTL uint32
+ PrivateKey *ecdsa.PrivateKey
+ SessionToken *session.Object
+ BearerToken *bearer.Token
+ XHeaders []string
+ IsRaw bool
+}
+
+type remoteStorage interface {
+ Get(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error)
+ Head(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error)
+ Range(ctx context.Context, address oid.Address, rng *objectSDK.Range, requestParams RemoteRequestParams) (*objectSDK.Object, error)
+
+ ForwardRequest(ctx context.Context, info coreclient.NodeInfo, forwarder RequestForwarder) (*objectSDK.Object, error)
+}
+
+type multiaddressRemoteStorage struct {
+ client coreclient.MultiAddressClient
+}
+
+func (s *multiaddressRemoteStorage) ForwardRequest(ctx context.Context, info coreclient.NodeInfo, forwarder RequestForwarder) (*objectSDK.Object, error) {
+ return forwarder(ctx, info, s.client)
+}
+
+func (s *multiaddressRemoteStorage) Range(ctx context.Context, address oid.Address, rng *objectSDK.Range, requestParams RemoteRequestParams) (*objectSDK.Object, error) {
+ var prm internalclient.PayloadRangePrm
+
+ prm.SetClient(s.client)
+ prm.SetTTL(requestParams.TTL)
+ prm.SetNetmapEpoch(requestParams.Epoch)
+ prm.SetAddress(address)
+ prm.SetPrivateKey(requestParams.PrivateKey)
+ prm.SetSessionToken(requestParams.SessionToken)
+ prm.SetBearerToken(requestParams.BearerToken)
+ prm.SetXHeaders(requestParams.XHeaders)
+ prm.SetRange(rng)
+ if requestParams.IsRaw {
+ prm.SetRawFlag()
+ }
+
+ res, err := internalclient.PayloadRange(ctx, prm)
+ if err != nil {
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ if errors.As(err, &errAccessDenied) {
+ obj, err := s.Get(ctx, address, requestParams)
+ if err != nil {
+ return nil, err
+ }
+
+ payload := obj.Payload()
+ from := rng.GetOffset()
+ to := from + rng.GetLength()
+
+ if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to {
+ return nil, new(apistatus.ObjectOutOfRange)
+ }
+
+ return s.payloadOnlyObject(payload[from:to]), nil
+ }
+ return nil, err
+ }
+
+ return s.payloadOnlyObject(res.PayloadRange()), nil
+}
+
+func (s *multiaddressRemoteStorage) Head(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) {
+ var prm internalclient.HeadObjectPrm
+
+ prm.SetClient(s.client)
+ prm.SetTTL(requestParams.TTL)
+ prm.SetNetmapEpoch(requestParams.Epoch)
+ prm.SetAddress(address)
+ prm.SetPrivateKey(requestParams.PrivateKey)
+ prm.SetSessionToken(requestParams.SessionToken)
+ prm.SetBearerToken(requestParams.BearerToken)
+ prm.SetXHeaders(requestParams.XHeaders)
+
+ if requestParams.IsRaw {
+ prm.SetRawFlag()
+ }
+
+ res, err := internalclient.HeadObject(ctx, prm)
+ if err != nil {
+ return nil, err
+ }
+
+ return res.Header(), nil
+}
+
+func (s *multiaddressRemoteStorage) Get(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) {
+ var prm internalclient.GetObjectPrm
+
+ prm.SetClient(s.client)
+ prm.SetTTL(requestParams.TTL)
+ prm.SetNetmapEpoch(requestParams.Epoch)
+ prm.SetAddress(address)
+ prm.SetPrivateKey(requestParams.PrivateKey)
+ prm.SetSessionToken(requestParams.SessionToken)
+ prm.SetBearerToken(requestParams.BearerToken)
+ prm.SetXHeaders(requestParams.XHeaders)
+
+ if requestParams.IsRaw {
+ prm.SetRawFlag()
+ }
+
+ res, err := internalclient.GetObject(ctx, prm)
+ if err != nil {
+ return nil, err
+ }
+
+ return res.Object(), nil
+}
+
+func (s *multiaddressRemoteStorage) payloadOnlyObject(payload []byte) *objectSDK.Object {
+ obj := objectSDK.New()
+ obj.SetPayload(payload)
+
+ return obj
+}
+
+type RangeHashRes struct {
+ hashes [][]byte
+}
+
+func (r *RangeHashRes) Hashes() [][]byte {
+ return r.hashes
+}
+
+type ecInfo struct {
+ localChunks []objectSDK.ECChunk
+ remoteChunks map[string][]objectSDK.ECChunk // node pk -> chunk slice
+}
+
+func newECInfo() *ecInfo {
+ return &ecInfo{
+ localChunks: make([]objectSDK.ECChunk, 0),
+ remoteChunks: make(map[string][]objectSDK.ECChunk),
+ }
+}
+
+func (e *ecInfo) addLocal(ecInfo *objectSDK.ECInfo) *objectSDK.ECInfoError {
+ for _, ch := range ecInfo.Chunks {
+ e.localChunks = append(e.localChunks, objectSDK.ECChunk(ch))
+ }
+ return e.createECInfoErr()
+}
+
+func (e *ecInfo) addRemote(nodePK string, ecInfo *objectSDK.ECInfo) *objectSDK.ECInfoError {
+ for _, ch := range ecInfo.Chunks {
+ e.remoteChunks[nodePK] = append(e.remoteChunks[nodePK], objectSDK.ECChunk(ch))
+ }
+ return e.createECInfoErr()
+}
+
+func (e *ecInfo) createECInfoErr() *objectSDK.ECInfoError {
+ unique := make(map[string]struct{})
+ result := objectSDK.NewECInfo()
+ for _, ch := range e.localChunks {
+ if _, found := unique[string(ch.ID.GetValue())]; found {
+ continue
+ }
+ result.AddChunk(ch)
+ unique[string(ch.ID.GetValue())] = struct{}{}
+ }
+ for _, chunks := range e.remoteChunks {
+ for _, ch := range chunks {
+ if _, found := unique[string(ch.ID.GetValue())]; found {
+ continue
+ }
+ result.AddChunk(ch)
+ unique[string(ch.ID.GetValue())] = struct{}{}
+ }
+ }
+ return objectSDK.NewECInfoError(result)
+}
diff --git a/pkg/services/object/get/util.go b/pkg/services/object/get/util.go
deleted file mode 100644
index dc98e6c58e..0000000000
--- a/pkg/services/object/get/util.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package getsvc
-
-import (
- "context"
- "crypto/ecdsa"
- "errors"
- "io"
-
- coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- internal "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
-)
-
-type SimpleObjectWriter struct {
- obj *object.Object
-
- pld []byte
-}
-
-type clientCacheWrapper struct {
- cache ClientConstructor
-}
-
-type clientWrapper struct {
- client coreclient.MultiAddressClient
-}
-
-type storageEngineWrapper struct {
- engine *engine.StorageEngine
-}
-
-type partWriter struct {
- ObjectWriter
-
- headWriter HeaderWriter
-
- chunkWriter ChunkWriter
-}
-
-type hasherWrapper struct {
- hash io.Writer
-}
-
-type nmSrcWrapper struct {
- nmSrc netmap.Source
-}
-
-func NewSimpleObjectWriter() *SimpleObjectWriter {
- return &SimpleObjectWriter{
- obj: object.New(),
- }
-}
-
-func (s *SimpleObjectWriter) WriteHeader(_ context.Context, obj *object.Object) error {
- s.obj = obj
-
- s.pld = make([]byte, 0, obj.PayloadSize())
-
- return nil
-}
-
-func (s *SimpleObjectWriter) WriteChunk(_ context.Context, p []byte) error {
- s.pld = append(s.pld, p...)
- return nil
-}
-
-func (s *SimpleObjectWriter) Object() *object.Object {
- if len(s.pld) > 0 {
- s.obj.SetPayload(s.pld)
- }
-
- return s.obj
-}
-
-func (c *clientCacheWrapper) get(info coreclient.NodeInfo) (getClient, error) {
- clt, err := c.cache.Get(info)
- if err != nil {
- return nil, err
- }
-
- return &clientWrapper{
- client: clt,
- }, nil
-}
-
-func (c *clientWrapper) getObject(exec *execCtx, info coreclient.NodeInfo) (*object.Object, error) {
- if exec.isForwardingEnabled() {
- return exec.prm.forwarder(info, c.client)
- }
-
- key, err := exec.key()
- if err != nil {
- return nil, err
- }
-
- if exec.headOnly() {
- var prm internalclient.HeadObjectPrm
-
- prm.SetContext(exec.context())
- prm.SetClient(c.client)
- prm.SetTTL(exec.prm.common.TTL())
- prm.SetNetmapEpoch(exec.curProcEpoch)
- prm.SetAddress(exec.address())
- prm.SetPrivateKey(key)
- prm.SetSessionToken(exec.prm.common.SessionToken())
- prm.SetBearerToken(exec.prm.common.BearerToken())
- prm.SetXHeaders(exec.prm.common.XHeaders())
-
- if exec.isRaw() {
- prm.SetRawFlag()
- }
-
- res, err := internalclient.HeadObject(prm)
- if err != nil {
- return nil, err
- }
-
- return res.Header(), nil
- }
- // we don't specify payload writer because we accumulate
- // the object locally (even huge).
- if rng := exec.ctxRange(); rng != nil {
- var prm internalclient.PayloadRangePrm
-
- prm.SetContext(exec.context())
- prm.SetClient(c.client)
- prm.SetTTL(exec.prm.common.TTL())
- prm.SetNetmapEpoch(exec.curProcEpoch)
- prm.SetAddress(exec.address())
- prm.SetPrivateKey(key)
- prm.SetSessionToken(exec.prm.common.SessionToken())
- prm.SetBearerToken(exec.prm.common.BearerToken())
- prm.SetXHeaders(exec.prm.common.XHeaders())
- prm.SetRange(rng)
-
- if exec.isRaw() {
- prm.SetRawFlag()
- }
-
- res, err := internalclient.PayloadRange(prm)
- if err != nil {
- var errAccessDenied *apistatus.ObjectAccessDenied
- if errors.As(err, &errAccessDenied) {
- // Current spec allows other storage node to deny access,
- // fallback to GET here.
- obj, err := c.get(exec, key)
- if err != nil {
- return nil, err
- }
-
- payload := obj.Payload()
- from := rng.GetOffset()
- to := from + rng.GetLength()
-
- if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to {
- return nil, new(apistatus.ObjectOutOfRange)
- }
-
- return payloadOnlyObject(payload[from:to]), nil
- }
- return nil, err
- }
-
- return payloadOnlyObject(res.PayloadRange()), nil
- }
-
- return c.get(exec, key)
-}
-
-func (c *clientWrapper) get(exec *execCtx, key *ecdsa.PrivateKey) (*object.Object, error) {
- var prm internalclient.GetObjectPrm
-
- prm.SetContext(exec.context())
- prm.SetClient(c.client)
- prm.SetTTL(exec.prm.common.TTL())
- prm.SetNetmapEpoch(exec.curProcEpoch)
- prm.SetAddress(exec.address())
- prm.SetPrivateKey(key)
- prm.SetSessionToken(exec.prm.common.SessionToken())
- prm.SetBearerToken(exec.prm.common.BearerToken())
- prm.SetXHeaders(exec.prm.common.XHeaders())
-
- if exec.isRaw() {
- prm.SetRawFlag()
- }
-
- res, err := internal.GetObject(prm)
- if err != nil {
- return nil, err
- }
-
- return res.Object(), nil
-}
-
-func (e *storageEngineWrapper) get(exec *execCtx) (*object.Object, error) {
- if exec.headOnly() {
- var headPrm engine.HeadPrm
- headPrm.WithAddress(exec.address())
- headPrm.WithRaw(exec.isRaw())
-
- r, err := e.engine.Head(headPrm)
- if err != nil {
- return nil, err
- }
-
- return r.Header(), nil
- } else if rng := exec.ctxRange(); rng != nil {
- var getRange engine.RngPrm
- getRange.WithAddress(exec.address())
- getRange.WithPayloadRange(rng)
-
- r, err := e.engine.GetRange(getRange)
- if err != nil {
- return nil, err
- }
-
- return r.Object(), nil
- } else {
- var getPrm engine.GetPrm
- getPrm.WithAddress(exec.address())
-
- r, err := e.engine.Get(getPrm)
- if err != nil {
- return nil, err
- }
-
- return r.Object(), nil
- }
-}
-
-func (w *partWriter) WriteChunk(ctx context.Context, p []byte) error {
- return w.chunkWriter.WriteChunk(ctx, p)
-}
-
-func (w *partWriter) WriteHeader(ctx context.Context, o *object.Object) error {
- return w.headWriter.WriteHeader(ctx, o)
-}
-
-func payloadOnlyObject(payload []byte) *object.Object {
- obj := object.New()
- obj.SetPayload(payload)
-
- return obj
-}
-
-func (h *hasherWrapper) WriteChunk(_ context.Context, p []byte) error {
- _, err := h.hash.Write(p)
- return err
-}
-
-func (n *nmSrcWrapper) currentEpoch() (uint64, error) {
- return n.nmSrc.Epoch()
-}
diff --git a/pkg/services/object/get/v2/errors.go b/pkg/services/object/get/v2/errors.go
new file mode 100644
index 0000000000..aaa09b891f
--- /dev/null
+++ b/pkg/services/object/get/v2/errors.go
@@ -0,0 +1,88 @@
+package getsvc
+
+import (
+ "errors"
+ "fmt"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ refs "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+)
+
+var (
+ errMissingObjAddress = errors.New("missing object address")
+ errWrongMessageSeq = errors.New("incorrect message sequence")
+ errNilObjectPart = errors.New("nil object part")
+ errMissingSignature = errors.New("missing signature")
+ errInvalidObjectIDSign = errors.New("invalid object ID signature")
+
+ errWrongHeaderPartTypeExpShortRecvWithSignature = fmt.Errorf("wrong header part type: expected %T, received %T",
+ (*objectV2.ShortHeader)(nil), (*objectV2.HeaderWithSignature)(nil),
+ )
+ errWrongHeaderPartTypeExpWithSignRecvShort = fmt.Errorf("wrong header part type: expected %T, received %T",
+ (*objectV2.HeaderWithSignature)(nil), (*objectV2.ShortHeader)(nil),
+ )
+)
+
+func errInvalidObjAddress(err error) error {
+ return fmt.Errorf("invalid object address: %w", err)
+}
+
+func errRequestParamsValidation(err error) error {
+ return fmt.Errorf("request params validation: %w", err)
+}
+
+func errFetchingSessionKey(err error) error {
+ return fmt.Errorf("fetching session key: %w", err)
+}
+
+func errUnknownChechsumType(t refs.ChecksumType) error {
+ return fmt.Errorf("unknown checksum type %v", t)
+}
+
+func errResponseVerificationFailed(err error) error {
+ return fmt.Errorf("response verification failed: %w", err)
+}
+
+func errCouldNotWriteObjHeader(err error) error {
+ return fmt.Errorf("could not write object header in Get forwarder: %w", err)
+}
+
+func errStreamOpenningFailed(err error) error {
+ return fmt.Errorf("stream opening failed: %w", err)
+}
+
+func errReadingResponseFailed(err error) error {
+ return fmt.Errorf("reading the response failed: %w", err)
+}
+
+func errUnexpectedObjectPart(v objectV2.GetObjectPart) error {
+ return fmt.Errorf("unexpected object part %T", v)
+}
+
+func errCouldNotWriteObjChunk(forwarder string, err error) error {
+ return fmt.Errorf("could not write object chunk in %s forwarder: %w", forwarder, err)
+}
+
+func errCouldNotCreateGetRangeStream(err error) error {
+ return fmt.Errorf("could not create Get payload range stream: %w", err)
+}
+
+func errUnexpectedRangePart(v objectV2.GetRangePart) error {
+ return fmt.Errorf("unexpected range type %T", v)
+}
+
+func errUnexpectedHeaderPart(v objectV2.GetHeaderPart) error {
+ return fmt.Errorf("unexpected header type %T", v)
+}
+
+func errMarshalID(err error) error {
+ return fmt.Errorf("marshal ID: %w", err)
+}
+
+func errCantReadSignature(err error) error {
+ return fmt.Errorf("can't read signature: %w", err)
+}
+
+func errSendingRequestFailed(err error) error {
+ return fmt.Errorf("sending the request failed: %w", err)
+}
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
new file mode 100644
index 0000000000..60fcd7fbfc
--- /dev/null
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -0,0 +1,179 @@
+package getsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "io"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
+ internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type getRequestForwarder struct {
+ OnceResign sync.Once
+ GlobalProgress int
+ Key *ecdsa.PrivateKey
+ Request *objectV2.GetRequest
+ Stream *streamObjectWriter
+
+ headerSent bool
+ headerSentGuard sync.Mutex
+}
+
+func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "getRequestForwarder.forwardRequestToNode",
+ trace.WithAttributes(attribute.String("address", addr.String())),
+ )
+ defer span.End()
+
+ var err error
+
+ // once compose and resign forwarding request
+ f.OnceResign.Do(func() {
+ // compose meta header of the local server
+ metaHdr := new(session.RequestMetaHeader)
+ metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1)
+ // TODO: #1165 think how to set the other fields
+ metaHdr.SetOrigin(f.Request.GetMetaHeader())
+ writeCurrentVersion(metaHdr)
+ f.Request.SetMetaHeader(metaHdr)
+ err = signature.SignServiceMessage(f.Key, f.Request)
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ getStream, err := f.openStream(ctx, addr, c)
+ if err != nil {
+ return nil, err
+ }
+ return nil, f.readStream(ctx, c, getStream, pubkey)
+}
+
+func (f *getRequestForwarder) verifyResponse(resp *objectV2.GetResponse, pubkey []byte) error {
+ // verify response key
+ if err := internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
+ return err
+ }
+
+ // verify response structure
+ if err := signature.VerifyServiceMessage(resp); err != nil {
+ return errResponseVerificationFailed(err)
+ }
+
+ return checkStatus(resp.GetMetaHeader().GetStatus())
+}
+
+func (f *getRequestForwarder) writeHeader(ctx context.Context, v *objectV2.GetObjectPartInit) error {
+ obj := new(objectV2.Object)
+
+ obj.SetObjectID(v.GetObjectID())
+ obj.SetSignature(v.GetSignature())
+ obj.SetHeader(v.GetHeader())
+
+ f.headerSentGuard.Lock()
+ defer f.headerSentGuard.Unlock()
+ if f.headerSent {
+ return nil
+ }
+ if err := f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj)); err != nil {
+ return errCouldNotWriteObjHeader(err)
+ }
+ f.headerSent = true
+ return nil
+}
+
+func (f *getRequestForwarder) openStream(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*rpc.GetResponseReader, error) {
+ var getStream *rpc.GetResponseReader
+ err := c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error {
+ var e error
+ getStream, e = rpc.GetObject(cli, f.Request, rpcclient.WithContext(ctx))
+ return e
+ })
+ if err != nil {
+ return nil, errStreamOpenningFailed(err)
+ }
+ return getStream, nil
+}
+
+func (f *getRequestForwarder) readStream(ctx context.Context, c client.MultiAddressClient, getStream *rpc.GetResponseReader, pubkey []byte) error {
+ var (
+ headWas bool
+ resp = new(objectV2.GetResponse)
+ localProgress int
+ )
+
+ for {
+ // receive message from server stream
+ err := getStream.Read(resp)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ if !headWas {
+ return io.ErrUnexpectedEOF
+ }
+
+ break
+ }
+
+ internalclient.ReportError(c, err)
+ return errReadingResponseFailed(err)
+ }
+
+ if err := f.verifyResponse(resp, pubkey); err != nil {
+ return err
+ }
+
+ switch v := resp.GetBody().GetObjectPart().(type) {
+ default:
+ return errUnexpectedObjectPart(v)
+ case *objectV2.GetObjectPartInit:
+ if headWas {
+ return errWrongMessageSeq
+ }
+ headWas = true
+ if err := f.writeHeader(ctx, v); err != nil {
+ return err
+ }
+ case *objectV2.GetObjectPartChunk:
+ if !headWas {
+ return errWrongMessageSeq
+ }
+
+ origChunk := v.GetChunk()
+
+ chunk := chunkToSend(f.GlobalProgress, localProgress, origChunk)
+ if len(chunk) == 0 {
+ localProgress += len(origChunk)
+ continue
+ }
+
+ if err = f.Stream.WriteChunk(ctx, chunk); err != nil {
+ return errCouldNotWriteObjChunk("Get", err)
+ }
+
+ localProgress += len(origChunk)
+ f.GlobalProgress += len(chunk)
+ case *objectV2.SplitInfo:
+ si := objectSDK.NewSplitInfoFromV2(v)
+ return objectSDK.NewSplitInfoError(si)
+ case *objectV2.ECInfo:
+ ei := objectSDK.NewECInfoFromV2(v)
+ return objectSDK.NewECInfoError(ei)
+ }
+ }
+ return nil
+}
diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go
new file mode 100644
index 0000000000..a44616fc9d
--- /dev/null
+++ b/pkg/services/object/get/v2/get_range_forwarder.go
@@ -0,0 +1,126 @@
+package getsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "io"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type getRangeRequestForwarder struct {
+ OnceResign sync.Once
+ GlobalProgress int
+ Key *ecdsa.PrivateKey
+ Request *objectV2.GetRangeRequest
+ Stream *streamObjectRangeWriter
+}
+
+func (f *getRangeRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "getRangeRequestForwarder.forwardRequestToNode",
+ trace.WithAttributes(attribute.String("address", addr.String())),
+ )
+ defer span.End()
+
+ var err error
+
+ // once compose and resign forwarding request
+ f.OnceResign.Do(func() {
+ // compose meta header of the local server
+ metaHdr := new(session.RequestMetaHeader)
+ metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1)
+ // TODO: #1165 think how to set the other fields
+ metaHdr.SetOrigin(f.Request.GetMetaHeader())
+ writeCurrentVersion(metaHdr)
+
+ f.Request.SetMetaHeader(metaHdr)
+
+ err = signature.SignServiceMessage(f.Key, f.Request)
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ rangeStream, err := f.openStream(ctx, addr, c)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, f.readStream(ctx, rangeStream, c, pubkey)
+}
+
+func (f *getRangeRequestForwarder) openStream(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*rpc.ObjectRangeResponseReader, error) {
+ // open stream
+ var rangeStream *rpc.ObjectRangeResponseReader
+ err := c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error {
+ var e error
+ rangeStream, e = rpc.GetObjectRange(cli, f.Request, rpcclient.WithContext(ctx))
+ return e
+ })
+ if err != nil {
+ return nil, errCouldNotCreateGetRangeStream(err)
+ }
+ return rangeStream, nil
+}
+
+func (f *getRangeRequestForwarder) readStream(ctx context.Context, rangeStream *rpc.ObjectRangeResponseReader, c client.MultiAddressClient, pubkey []byte) error {
+ resp := new(objectV2.GetRangeResponse)
+ var localProgress int
+
+ for {
+ // receive message from server stream
+ err := rangeStream.Read(resp)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ internalclient.ReportError(c, err)
+ return errReadingResponseFailed(err)
+ }
+
+ if err := verifyResponse(resp, pubkey); err != nil {
+ return err
+ }
+
+ switch v := resp.GetBody().GetRangePart().(type) {
+ case nil:
+ return errUnexpectedRangePart(v)
+ case *objectV2.GetRangePartChunk:
+ origChunk := v.GetChunk()
+
+ chunk := chunkToSend(f.GlobalProgress, localProgress, origChunk)
+ if len(chunk) == 0 {
+ localProgress += len(origChunk)
+ continue
+ }
+
+ if err = f.Stream.WriteChunk(ctx, chunk); err != nil {
+ return errCouldNotWriteObjChunk("GetRange", err)
+ }
+
+ localProgress += len(origChunk)
+ f.GlobalProgress += len(chunk)
+ case *objectV2.SplitInfo:
+ si := objectSDK.NewSplitInfoFromV2(v)
+ return objectSDK.NewSplitInfoError(si)
+ case *objectV2.ECInfo:
+ ei := objectSDK.NewECInfoFromV2(v)
+ return objectSDK.NewECInfoError(ei)
+ }
+ }
+ return nil
+}
diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go
new file mode 100644
index 0000000000..308ccd5129
--- /dev/null
+++ b/pkg/services/object/get/v2/get_range_hash.go
@@ -0,0 +1,218 @@
+package getsvc
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+)
+
+// GetRangeHash calls internal service and returns v2 response.
+func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
+ forward, err := s.needToForwardGetRangeHashRequest(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ if forward.needToForward {
+ return s.forwardGetRangeHashRequest(ctx, req, forward)
+ }
+ p, err := s.toHashRangePrm(req)
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := s.svc.GetRangeHash(ctx, *p)
+ if err != nil {
+ return nil, err
+ }
+
+ return toHashResponse(req.GetBody().GetType(), res), nil
+}
+
+type getRangeForwardParams struct {
+ needToForward bool
+ containerNodes []netmapSDK.NodeInfo
+ address oid.Address
+}
+
+func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
+ if req.GetMetaHeader().GetTTL() <= 1 {
+ return getRangeForwardParams{}, nil
+ }
+
+ var result getRangeForwardParams
+ addrV2 := req.GetBody().GetAddress()
+ if addrV2 == nil {
+ return result, errMissingObjAddress
+ }
+
+ var addr oid.Address
+ err := addr.ReadFromV2(*addrV2)
+ if err != nil {
+ return result, errInvalidObjAddress(err)
+ }
+ result.address = addr
+
+ cont, err := s.contSource.Get(ctx, addr.Container())
+ if err != nil {
+ return result, fmt.Errorf("(%T) could not get container: %w", s, err)
+ }
+
+ epoch, err := s.netmapSource.Epoch(ctx)
+ if err != nil {
+ return result, fmt.Errorf("(%T) could not get epoch: %w", s, err)
+ }
+
+ nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch)
+ if err != nil {
+ return result, fmt.Errorf("(%T) could not get netmap: %w", s, err)
+ }
+
+ builder := placement.NewNetworkMapBuilder(nm)
+
+ objectID := addr.Object()
+ nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy())
+ if err != nil {
+ return result, fmt.Errorf("(%T) could not build object placement: %w", s, err)
+ }
+ result.containerNodes = distinctBy(placement.FlattenNodes(nodesVector), func(n netmapSDK.NodeInfo) string { return hex.EncodeToString(n.PublicKey()) })
+
+ for _, node := range result.containerNodes {
+ if s.announcedKeys.IsLocalKey(node.PublicKey()) {
+ return result, nil
+ }
+ }
+ result.needToForward = true
+ return result, nil
+}
+
+func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest, params getRangeForwardParams) (*objectV2.GetRangeHashResponse, error) {
+ key, err := s.keyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ metaHdr := new(session.RequestMetaHeader)
+ metaHdr.SetTTL(req.GetMetaHeader().GetTTL() - 1)
+ metaHdr.SetOrigin(req.GetMetaHeader())
+ writeCurrentVersion(metaHdr)
+ req.SetMetaHeader(metaHdr)
+
+ if err := signature.SignServiceMessage(key, req); err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+ for _, node := range params.containerNodes {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ var addrGr network.AddressGroup
+ if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil {
+ s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ continue
+ }
+
+ var extAddr network.AddressGroup
+ if len(node.ExternalAddresses()) > 0 {
+ if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil {
+ s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+ continue
+ }
+ }
+
+ var info clientCore.NodeInfo
+ clientCore.NodeInfoFromNetmapElement(&info, placement.NewNode(addrGr, extAddr, node.PublicKey()))
+
+ resp, err := s.performGetRangeHashOnNode(ctx, req, info)
+ if err == nil {
+ if err := verifyResponse(resp, info.PublicKey()); err != nil {
+ return nil, err
+ }
+ return resp, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode,
+ zap.String("node_public_key", hex.EncodeToString(node.PublicKey())),
+ zap.Stringer("address", params.address),
+ zap.Error(err))
+ }
+ s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
+ if firstErr != nil {
+ return nil, firstErr
+ }
+ return nil, new(apistatus.ObjectNotFound)
+}
+
+func (s *Service) performGetRangeHashOnNode(ctx context.Context, req *objectV2.GetRangeHashRequest, info clientCore.NodeInfo) (*objectV2.GetRangeHashResponse, error) {
+ cl, err := s.clientSource.Get(info)
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+ var resp *objectV2.GetRangeHashResponse
+ info.AddressGroup().IterateAddresses(func(a network.Address) bool {
+ resp, err = s.performGetRangeHashOnAddress(ctx, req, cl, a)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ return false
+ }
+ return true
+ })
+ if firstErr != nil {
+ return nil, firstErr
+ }
+ if resp == nil {
+ return nil, new(apistatus.ObjectNotFound)
+ }
+ return resp, nil
+}
+
+func (s *Service) performGetRangeHashOnAddress(ctx context.Context, req *objectV2.GetRangeHashRequest, cl clientCore.MultiAddressClient,
+ a network.Address,
+) (*objectV2.GetRangeHashResponse, error) {
+ var resp *objectV2.GetRangeHashResponse
+ var rpcErr error
+ err := cl.RawForAddress(ctx, a, func(cli *rpcclient.Client) error {
+ resp, rpcErr = rpc.HashObjectRange(cli, req, rpcclient.WithContext(ctx))
+ return rpcErr
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, err
+}
+
+func distinctBy[T any, K comparable](source []T, keySelector func(v T) K) []T {
+ var result []T
+ dict := make(map[K]struct{})
+ for _, v := range source {
+ key := keySelector(v)
+ if _, exists := dict[key]; !exists {
+ result = append(result, v)
+ dict[key] = struct{}{}
+ }
+ }
+ return result
+}
diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go
new file mode 100644
index 0000000000..56056398db
--- /dev/null
+++ b/pkg/services/object/get/v2/head_forwarder.go
@@ -0,0 +1,160 @@
+package getsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type headRequestForwarder struct {
+ Request *objectV2.HeadRequest
+ OnceResign sync.Once
+ ObjectAddr oid.Address
+ Key *ecdsa.PrivateKey
+}
+
+func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "headRequestForwarder.forwardRequestToNode",
+ trace.WithAttributes(attribute.String("address", addr.String())),
+ )
+ defer span.End()
+
+ var err error
+
+ // once compose and resign forwarding request
+ f.OnceResign.Do(func() {
+ // compose meta header of the local server
+ metaHdr := new(session.RequestMetaHeader)
+ metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1)
+ // TODO: #1165 think how to set the other fields
+ metaHdr.SetOrigin(f.Request.GetMetaHeader())
+ writeCurrentVersion(metaHdr)
+
+ f.Request.SetMetaHeader(metaHdr)
+
+ err = signature.SignServiceMessage(f.Key, f.Request)
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ headResp, err := f.sendHeadRequest(ctx, addr, c)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := verifyResponse(headResp, pubkey); err != nil {
+ return nil, err
+ }
+
+ var (
+ hdr *objectV2.Header
+ idSig *refs.Signature
+ )
+
+ switch v := headResp.GetBody().GetHeaderPart().(type) {
+ case nil:
+ return nil, errUnexpectedHeaderPart(v)
+ case *objectV2.ShortHeader:
+ if hdr, err = f.getHeaderFromShortHeader(v); err != nil {
+ return nil, err
+ }
+ case *objectV2.HeaderWithSignature:
+ if hdr, idSig, err = f.getHeaderAndSignature(v); err != nil {
+ return nil, err
+ }
+ case *objectV2.SplitInfo:
+ si := objectSDK.NewSplitInfoFromV2(v)
+ return nil, objectSDK.NewSplitInfoError(si)
+ case *objectV2.ECInfo:
+ ei := objectSDK.NewECInfoFromV2(v)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+
+ objv2 := new(objectV2.Object)
+ objv2.SetHeader(hdr)
+ objv2.SetSignature(idSig)
+
+ obj := objectSDK.NewFromV2(objv2)
+ obj.SetID(f.ObjectAddr.Object())
+
+ return obj, nil
+}
+
+func (f *headRequestForwarder) getHeaderFromShortHeader(sh *objectV2.ShortHeader) (*objectV2.Header, error) {
+ if !f.Request.GetBody().GetMainOnly() {
+ return nil, errWrongHeaderPartTypeExpShortRecvWithSignature
+ }
+
+ hdr := new(objectV2.Header)
+ hdr.SetPayloadLength(sh.GetPayloadLength())
+ hdr.SetVersion(sh.GetVersion())
+ hdr.SetOwnerID(sh.GetOwnerID())
+ hdr.SetObjectType(sh.GetObjectType())
+ hdr.SetCreationEpoch(sh.GetCreationEpoch())
+ hdr.SetPayloadHash(sh.GetPayloadHash())
+ hdr.SetHomomorphicHash(sh.GetHomomorphicHash())
+ return hdr, nil
+}
+
+func (f *headRequestForwarder) getHeaderAndSignature(hdrWithSig *objectV2.HeaderWithSignature) (*objectV2.Header, *refs.Signature, error) {
+ if f.Request.GetBody().GetMainOnly() {
+ return nil, nil, errWrongHeaderPartTypeExpWithSignRecvShort
+ }
+
+ if hdrWithSig == nil {
+ return nil, nil, errNilObjectPart
+ }
+
+ hdr := hdrWithSig.GetHeader()
+ idSig := hdrWithSig.GetSignature()
+
+ if idSig == nil {
+ return nil, nil, errMissingSignature
+ }
+
+ binID, err := f.ObjectAddr.Object().Marshal()
+ if err != nil {
+ return nil, nil, errMarshalID(err)
+ }
+
+ var sig frostfscrypto.Signature
+ if err := sig.ReadFromV2(*idSig); err != nil {
+ return nil, nil, errCantReadSignature(err)
+ }
+
+ if !sig.Verify(binID) {
+ return nil, nil, errInvalidObjectIDSign
+ }
+
+ return hdr, idSig, nil
+}
+
+func (f *headRequestForwarder) sendHeadRequest(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*objectV2.HeadResponse, error) {
+ var headResp *objectV2.HeadResponse
+ err := c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error {
+ var e error
+ headResp, e = rpc.HeadObject(cli, f.Request, rpcclient.WithContext(ctx))
+ return e
+ })
+ if err != nil {
+ return nil, errSendingRequestFailed(err)
+ }
+ return headResp, nil
+}
diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go
index 3fd8cd04a0..0ec8912fd5 100644
--- a/pkg/services/object/get/v2/service.go
+++ b/pkg/services/object/get/v2/service.go
@@ -4,11 +4,16 @@ import (
"context"
"errors"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.uber.org/zap"
)
// Service implements Get operation of Object service v2.
@@ -19,15 +24,44 @@ type Service struct {
// Option represents Service constructor option.
type Option func(*cfg)
+type clientSource interface {
+ Get(info clientcore.NodeInfo) (clientcore.MultiAddressClient, error)
+}
+
type cfg struct {
svc *getsvc.Service
keyStorage *objutil.KeyStorage
+
+ clientSource clientSource
+
+ netmapSource netmap.Source
+
+ announcedKeys netmap.AnnouncedKeys
+
+ contSource container.Source
+
+ log *logger.Logger
}
// NewService constructs Service instance from provided options.
-func NewService(opts ...Option) *Service {
- c := new(cfg)
+func NewService(svc *getsvc.Service,
+ keyStorage *objutil.KeyStorage,
+ clientSource clientSource,
+ netmapSource netmap.Source,
+ announcedKeys netmap.AnnouncedKeys,
+ contSource container.Source,
+ opts ...Option,
+) *Service {
+ c := &cfg{
+ svc: svc,
+ keyStorage: keyStorage,
+ clientSource: clientSource,
+ netmapSource: netmapSource,
+ announcedKeys: announcedKeys,
+ contSource: contSource,
+ log: logger.NewLoggerWrapper(zap.L()),
+ }
for i := range opts {
opts[i](c)
@@ -47,11 +81,14 @@ func (s *Service) Get(req *objectV2.GetRequest, stream objectSvc.GetObjectStream
err = s.svc.Get(stream.Context(), *p)
- var splitErr *object.SplitInfoError
+ var splitErr *objectSDK.SplitInfoError
+ var ecErr *objectSDK.ECInfoError
switch {
case errors.As(err, &splitErr):
return stream.Send(splitInfoResponse(splitErr.SplitInfo()))
+ case errors.As(err, &ecErr):
+ return stream.Send(ecInfoResponse(ecErr.ECInfo()))
default:
return err
}
@@ -66,62 +103,48 @@ func (s *Service) GetRange(req *objectV2.GetRangeRequest, stream objectSvc.GetOb
err = s.svc.GetRange(stream.Context(), *p)
- var splitErr *object.SplitInfoError
+ var splitErr *objectSDK.SplitInfoError
+ var ecErr *objectSDK.ECInfoError
switch {
case errors.As(err, &splitErr):
return stream.Send(splitInfoRangeResponse(splitErr.SplitInfo()))
+ case errors.As(err, &ecErr):
+ return stream.Send(ecInfoRangeResponse(ecErr.ECInfo()))
default:
return err
}
}
-// GetRangeHash calls internal service and returns v2 response.
-func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- p, err := s.toHashRangePrm(req)
- if err != nil {
- return nil, err
- }
-
- res, err := s.svc.GetRangeHash(ctx, *p)
- if err != nil {
- return nil, err
- }
-
- return toHashResponse(req.GetBody().GetType(), res), nil
-}
-
// Head serves ForstFS API v2 compatible HEAD requests.
func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
resp := new(objectV2.HeadResponse)
resp.SetBody(new(objectV2.HeadResponseBody))
- p, err := s.toHeadPrm(ctx, req, resp)
+ p, err := s.toHeadPrm(req, resp)
if err != nil {
return nil, err
}
err = s.svc.Head(ctx, *p)
- var splitErr *object.SplitInfoError
+ var splitErr *objectSDK.SplitInfoError
+ var ecErr *objectSDK.ECInfoError
if errors.As(err, &splitErr) {
setSplitInfoHeadResponse(splitErr.SplitInfo(), resp)
err = nil
}
+ if errors.As(err, &ecErr) {
+ setECInfoHeadResponse(ecErr.ECInfo(), resp)
+ err = nil
+ }
return resp, err
}
-func WithInternalService(v *getsvc.Service) Option {
+func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.svc = v
- }
-}
-
-// WithKeyStorage returns option to set local private key storage.
-func WithKeyStorage(ks *objutil.KeyStorage) Option {
- return func(c *cfg) {
- c.keyStorage = ks
+ c.log = l
}
}
diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go
index 4347ef416d..0d73bcd4d4 100644
--- a/pkg/services/object/get/v2/streamer.go
+++ b/pkg/services/object/get/v2/streamer.go
@@ -3,9 +3,9 @@ package getsvc
import (
"context"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
type streamObjectWriter struct {
@@ -16,7 +16,7 @@ type streamObjectRangeWriter struct {
objectSvc.GetObjectRangeStream
}
-func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *object.Object) error {
+func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Object) error {
p := new(objectV2.GetObjectPartInit)
objV2 := obj.ToV2()
@@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *object.Object)
p.SetHeader(objV2.GetHeader())
p.SetSignature(objV2.GetSignature())
- return s.GetObjectStream.Send(newResponse(p))
+ return s.Send(newResponse(p))
}
func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error {
p := new(objectV2.GetObjectPartChunk)
p.SetChunk(chunk)
- return s.GetObjectStream.Send(newResponse(p))
+ return s.Send(newResponse(p))
}
func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
@@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
}
func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error {
- return s.GetObjectRangeStream.Send(newRangeResponse(chunk))
+ return s.Send(newRangeResponse(chunk))
}
func newRangeResponse(p []byte) *objectV2.GetRangeResponse {
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index e0393e56f7..e699a3779a 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -4,52 +4,42 @@ import (
"context"
"crypto/sha256"
"errors"
- "fmt"
"hash"
- "io"
- "sync"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/status"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/status"
+ clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
versionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
)
-var errWrongMessageSeq = errors.New("incorrect message sequence")
-
func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStream) (*getsvc.Prm, error) {
body := req.GetBody()
addrV2 := body.GetAddress()
if addrV2 == nil {
- return nil, errors.New("missing object address")
+ return nil, errMissingObjAddress
}
var addr oid.Address
err := addr.ReadFromV2(*addrV2)
if err != nil {
- return nil, fmt.Errorf("invalid object address: %w", err)
+ return nil, errInvalidObjAddress(err)
}
- meta := req.GetMetaHeader()
-
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return nil, err
@@ -65,135 +55,19 @@ func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStre
p.SetObjectWriter(streamWrapper)
if !commonPrm.LocalOnly() {
- var onceResign sync.Once
+ key, err := s.keyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
- var onceHeaderSending sync.Once
- var globalProgress int
+ forwarder := &getRequestForwarder{
+ GlobalProgress: 0,
+ Key: key,
+ Request: req,
+ Stream: streamWrapper,
+ }
- p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- var err error
-
- key, err := s.keyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
-
- // once compose and resign forwarding request
- onceResign.Do(func() {
- // compose meta header of the local server
- metaHdr := new(session.RequestMetaHeader)
- metaHdr.SetTTL(meta.GetTTL() - 1)
- // TODO: #1165 think how to set the other fields
- metaHdr.SetOrigin(meta)
- writeCurrentVersion(metaHdr)
-
- req.SetMetaHeader(metaHdr)
-
- err = signature.SignServiceMessage(key, req)
- })
-
- if err != nil {
- return nil, err
- }
-
- // code below is copy-pasted from c.GetObject implementation,
- // perhaps it is worth highlighting the utility function in frostfs-api-go
-
- // open stream
- var getStream *rpc.GetResponseReader
- err = c.RawForAddress(addr, func(cli *rpcclient.Client) error {
- getStream, err = rpc.GetObject(cli, req, rpcclient.WithContext(stream.Context()))
- return err
- })
- if err != nil {
- return nil, fmt.Errorf("stream opening failed: %w", err)
- }
-
- var (
- headWas bool
- resp = new(objectV2.GetResponse)
- localProgress int
- )
-
- for {
- // receive message from server stream
- err := getStream.Read(resp)
- if err != nil {
- if errors.Is(err, io.EOF) {
- if !headWas {
- return nil, io.ErrUnexpectedEOF
- }
-
- break
- }
-
- internalclient.ReportError(c, err)
- return nil, fmt.Errorf("reading the response failed: %w", err)
- }
-
- // verify response key
- if err = internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
- return nil, err
- }
-
- // verify response structure
- if err := signature.VerifyServiceMessage(resp); err != nil {
- return nil, fmt.Errorf("response verification failed: %w", err)
- }
-
- if err = checkStatus(resp.GetMetaHeader().GetStatus()); err != nil {
- return nil, err
- }
-
- switch v := resp.GetBody().GetObjectPart().(type) {
- default:
- return nil, fmt.Errorf("unexpected object part %T", v)
- case *objectV2.GetObjectPartInit:
- if headWas {
- return nil, errWrongMessageSeq
- }
-
- headWas = true
-
- obj := new(objectV2.Object)
-
- obj.SetObjectID(v.GetObjectID())
- obj.SetSignature(v.GetSignature())
- obj.SetHeader(v.GetHeader())
-
- onceHeaderSending.Do(func() {
- err = streamWrapper.WriteHeader(stream.Context(), object.NewFromV2(obj))
- })
- if err != nil {
- return nil, fmt.Errorf("could not write object header in Get forwarder: %w", err)
- }
- case *objectV2.GetObjectPartChunk:
- if !headWas {
- return nil, errWrongMessageSeq
- }
-
- origChunk := v.GetChunk()
-
- chunk := chunkToSend(globalProgress, localProgress, origChunk)
- if len(chunk) == 0 {
- localProgress += len(origChunk)
- continue
- }
-
- if err = streamWrapper.WriteChunk(stream.Context(), chunk); err != nil {
- return nil, fmt.Errorf("could not write object chunk in Get forwarder: %w", err)
- }
-
- localProgress += len(origChunk)
- globalProgress += len(chunk)
- case *objectV2.SplitInfo:
- si := object.NewSplitInfoFromV2(v)
- return nil, object.NewSplitInfoError(si)
- }
- }
-
- return nil, nil
- }))
+ p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode))
}
return p, nil
@@ -204,18 +78,16 @@ func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.Get
addrV2 := body.GetAddress()
if addrV2 == nil {
- return nil, errors.New("missing object address")
+ return nil, errMissingObjAddress
}
var addr oid.Address
err := addr.ReadFromV2(*addrV2)
if err != nil {
- return nil, fmt.Errorf("invalid object address: %w", err)
+ return nil, errInvalidObjAddress(err)
}
- meta := req.GetMetaHeader()
-
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return nil, err
@@ -229,112 +101,27 @@ func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.Get
p.WithAddress(addr)
p.WithRawFlag(body.GetRaw())
p.SetChunkWriter(streamWrapper)
- p.SetRange(object.NewRangeFromV2(body.GetRange()))
+ p.SetRange(objectSDK.NewRangeFromV2(body.GetRange()))
err = p.Validate()
if err != nil {
- return nil, fmt.Errorf("request params validation: %w", err)
+ return nil, errRequestParamsValidation(err)
}
if !commonPrm.LocalOnly() {
- var onceResign sync.Once
- var globalProgress int
-
key, err := s.keyStorage.GetKey(nil)
if err != nil {
return nil, err
}
- p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- var err error
+ forwarder := &getRangeRequestForwarder{
+ GlobalProgress: 0,
+ Key: key,
+ Request: req,
+ Stream: streamWrapper,
+ }
- // once compose and resign forwarding request
- onceResign.Do(func() {
- // compose meta header of the local server
- metaHdr := new(session.RequestMetaHeader)
- metaHdr.SetTTL(meta.GetTTL() - 1)
- // TODO: #1165 think how to set the other fields
- metaHdr.SetOrigin(meta)
- writeCurrentVersion(metaHdr)
-
- req.SetMetaHeader(metaHdr)
-
- err = signature.SignServiceMessage(key, req)
- })
-
- if err != nil {
- return nil, err
- }
-
- // code below is copy-pasted from c.ObjectPayloadRangeData implementation,
- // perhaps it is worth highlighting the utility function in frostfs-api-go
-
- // open stream
- var rangeStream *rpc.ObjectRangeResponseReader
- err = c.RawForAddress(addr, func(cli *rpcclient.Client) error {
- rangeStream, err = rpc.GetObjectRange(cli, req, rpcclient.WithContext(stream.Context()))
- return err
- })
- if err != nil {
- return nil, fmt.Errorf("could not create Get payload range stream: %w", err)
- }
-
- resp := new(objectV2.GetRangeResponse)
- var localProgress int
-
- for {
- // receive message from server stream
- err := rangeStream.Read(resp)
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- internalclient.ReportError(c, err)
- return nil, fmt.Errorf("reading the response failed: %w", err)
- }
-
- // verify response key
- if err = internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
- return nil, err
- }
-
- // verify response structure
- if err := signature.VerifyServiceMessage(resp); err != nil {
- return nil, fmt.Errorf("could not verify %T: %w", resp, err)
- }
-
- if err = checkStatus(resp.GetMetaHeader().GetStatus()); err != nil {
- return nil, err
- }
-
- switch v := resp.GetBody().GetRangePart().(type) {
- case nil:
- return nil, fmt.Errorf("unexpected range type %T", v)
- case *objectV2.GetRangePartChunk:
- origChunk := v.GetChunk()
-
- chunk := chunkToSend(globalProgress, localProgress, origChunk)
- if len(chunk) == 0 {
- localProgress += len(origChunk)
- continue
- }
-
- if err = streamWrapper.WriteChunk(stream.Context(), chunk); err != nil {
- return nil, fmt.Errorf("could not write object chunk in GetRange forwarder: %w", err)
- }
-
- localProgress += len(origChunk)
- globalProgress += len(chunk)
- case *objectV2.SplitInfo:
- si := object.NewSplitInfoFromV2(v)
-
- return nil, object.NewSplitInfoError(si)
- }
- }
-
- return nil, nil
- }))
+ p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode))
}
return p, nil
@@ -345,14 +132,14 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran
addrV2 := body.GetAddress()
if addrV2 == nil {
- return nil, errors.New("missing object address")
+ return nil, errMissingObjAddress
}
var addr oid.Address
err := addr.ReadFromV2(*addrV2)
if err != nil {
- return nil, fmt.Errorf("invalid object address: %w", err)
+ return nil, errInvalidObjAddress(err)
}
commonPrm, err := util.CommonPrmFromV2(req)
@@ -370,23 +157,23 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran
ID: tok.ID(),
Owner: tok.Issuer(),
})
- if err != nil && errors.As(err, new(apistatus.SessionTokenNotFound)) {
+ if err != nil && clientSDK.IsErrSessionNotFound(err) {
commonPrm.ForgetTokens()
signerKey, err = s.keyStorage.GetKey(nil)
}
if err != nil {
- return nil, fmt.Errorf("fetching session key: %w", err)
+ return nil, errFetchingSessionKey(err)
}
p.WithCachedSignerKey(signerKey)
}
rngsV2 := body.GetRanges()
- rngs := make([]object.Range, len(rngsV2))
+ rngs := make([]objectSDK.Range, len(rngsV2))
for i := range rngsV2 {
- rngs[i] = *object.NewRangeFromV2(&rngsV2[i])
+ rngs[i] = *objectSDK.NewRangeFromV2(&rngsV2[i])
}
p.SetRangeList(rngs)
@@ -394,11 +181,9 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran
switch t := body.GetType(); t {
default:
- return nil, fmt.Errorf("unknown checksum type %v", t)
+ return nil, errUnknownChechsumType(t)
case refs.SHA256:
- p.SetHashGenerator(func() hash.Hash {
- return sha256.New()
- })
+ p.SetHashGenerator(sha256.New)
case refs.TillichZemor:
p.SetHashGenerator(func() hash.Hash {
return tz.New()
@@ -414,7 +199,7 @@ type headResponseWriter struct {
body *objectV2.HeadResponseBody
}
-func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *object.Object) error {
+func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *objectSDK.Object) error {
if w.mainOnly {
w.body.SetHeaderPart(toShortObjectHeader(hdr))
} else {
@@ -424,23 +209,21 @@ func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *object.Object)
return nil
}
-func (s *Service) toHeadPrm(ctx context.Context, req *objectV2.HeadRequest, resp *objectV2.HeadResponse) (*getsvc.HeadPrm, error) {
+func (s *Service) toHeadPrm(req *objectV2.HeadRequest, resp *objectV2.HeadResponse) (*getsvc.HeadPrm, error) {
body := req.GetBody()
addrV2 := body.GetAddress()
if addrV2 == nil {
- return nil, errors.New("missing object address")
+ return nil, errMissingObjAddress
}
var objAddr oid.Address
err := objAddr.ReadFromV2(*addrV2)
if err != nil {
- return nil, fmt.Errorf("invalid object address: %w", err)
+ return nil, errInvalidObjAddress(err)
}
- meta := req.GetMetaHeader()
-
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return nil, err
@@ -456,142 +239,27 @@ func (s *Service) toHeadPrm(ctx context.Context, req *objectV2.HeadRequest, resp
body: resp.GetBody(),
})
- if !commonPrm.LocalOnly() {
- var onceResign sync.Once
-
- p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- var err error
-
- key, err := s.keyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
-
- // once compose and resign forwarding request
- onceResign.Do(func() {
- // compose meta header of the local server
- metaHdr := new(session.RequestMetaHeader)
- metaHdr.SetTTL(meta.GetTTL() - 1)
- // TODO: #1165 think how to set the other fields
- metaHdr.SetOrigin(meta)
- writeCurrentVersion(metaHdr)
-
- req.SetMetaHeader(metaHdr)
-
- err = signature.SignServiceMessage(key, req)
- })
-
- if err != nil {
- return nil, err
- }
-
- // code below is copy-pasted from c.GetObjectHeader implementation,
- // perhaps it is worth highlighting the utility function in frostfs-api-go
-
- // send Head request
- var headResp *objectV2.HeadResponse
- err = c.RawForAddress(addr, func(cli *rpcclient.Client) error {
- headResp, err = rpc.HeadObject(cli, req, rpcclient.WithContext(ctx))
- return err
- })
- if err != nil {
- return nil, fmt.Errorf("sending the request failed: %w", err)
- }
-
- // verify response key
- if err = internal.VerifyResponseKeyV2(pubkey, headResp); err != nil {
- return nil, err
- }
-
- // verify response structure
- if err := signature.VerifyServiceMessage(headResp); err != nil {
- return nil, fmt.Errorf("response verification failed: %w", err)
- }
-
- if err = checkStatus(resp.GetMetaHeader().GetStatus()); err != nil {
- return nil, err
- }
-
- var (
- hdr *objectV2.Header
- idSig *refs.Signature
- )
-
- switch v := headResp.GetBody().GetHeaderPart().(type) {
- case nil:
- return nil, fmt.Errorf("unexpected header type %T", v)
- case *objectV2.ShortHeader:
- if !body.GetMainOnly() {
- return nil, fmt.Errorf("wrong header part type: expected %T, received %T",
- (*objectV2.ShortHeader)(nil), (*objectV2.HeaderWithSignature)(nil),
- )
- }
-
- h := v
-
- hdr = new(objectV2.Header)
- hdr.SetPayloadLength(h.GetPayloadLength())
- hdr.SetVersion(h.GetVersion())
- hdr.SetOwnerID(h.GetOwnerID())
- hdr.SetObjectType(h.GetObjectType())
- hdr.SetCreationEpoch(h.GetCreationEpoch())
- hdr.SetPayloadHash(h.GetPayloadHash())
- hdr.SetHomomorphicHash(h.GetHomomorphicHash())
- case *objectV2.HeaderWithSignature:
- if body.GetMainOnly() {
- return nil, fmt.Errorf("wrong header part type: expected %T, received %T",
- (*objectV2.HeaderWithSignature)(nil), (*objectV2.ShortHeader)(nil),
- )
- }
-
- hdrWithSig := v
- if hdrWithSig == nil {
- return nil, errors.New("nil object part")
- }
-
- hdr = hdrWithSig.GetHeader()
- idSig = hdrWithSig.GetSignature()
-
- if idSig == nil {
- // TODO(@cthulhu-rider): #1387 use "const" error
- return nil, errors.New("missing signature")
- }
-
- binID, err := objAddr.Object().Marshal()
- if err != nil {
- return nil, fmt.Errorf("marshal ID: %w", err)
- }
-
- var sig frostfscrypto.Signature
- if err := sig.ReadFromV2(*idSig); err != nil {
- return nil, fmt.Errorf("can't read signature: %w", err)
- }
-
- if !sig.Verify(binID) {
- return nil, errors.New("invalid object ID signature")
- }
- case *objectV2.SplitInfo:
- si := object.NewSplitInfoFromV2(v)
-
- return nil, object.NewSplitInfoError(si)
- }
-
- objv2 := new(objectV2.Object)
- objv2.SetHeader(hdr)
- objv2.SetSignature(idSig)
-
- obj := object.NewFromV2(objv2)
- obj.SetID(objAddr.Object())
-
- // convert the object
- return obj, nil
- }))
+ if commonPrm.LocalOnly() {
+ return p, nil
}
+ key, err := s.keyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ forwarder := &headRequestForwarder{
+ Request: req,
+ ObjectAddr: objAddr,
+ Key: key,
+ }
+
+ p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode))
+
return p, nil
}
-func splitInfoResponse(info *object.SplitInfo) *objectV2.GetResponse {
+func splitInfoResponse(info *objectSDK.SplitInfo) *objectV2.GetResponse {
resp := new(objectV2.GetResponse)
body := new(objectV2.GetResponseBody)
@@ -602,7 +270,18 @@ func splitInfoResponse(info *object.SplitInfo) *objectV2.GetResponse {
return resp
}
-func splitInfoRangeResponse(info *object.SplitInfo) *objectV2.GetRangeResponse {
+func ecInfoResponse(info *objectSDK.ECInfo) *objectV2.GetResponse {
+ resp := new(objectV2.GetResponse)
+
+ body := new(objectV2.GetResponseBody)
+ resp.SetBody(body)
+
+ body.SetObjectPart(info.ToV2())
+
+ return resp
+}
+
+func splitInfoRangeResponse(info *objectSDK.SplitInfo) *objectV2.GetRangeResponse {
resp := new(objectV2.GetRangeResponse)
body := new(objectV2.GetRangeResponseBody)
@@ -613,7 +292,22 @@ func splitInfoRangeResponse(info *object.SplitInfo) *objectV2.GetRangeResponse {
return resp
}
-func setSplitInfoHeadResponse(info *object.SplitInfo, resp *objectV2.HeadResponse) {
+func ecInfoRangeResponse(info *objectSDK.ECInfo) *objectV2.GetRangeResponse {
+ resp := new(objectV2.GetRangeResponse)
+
+ body := new(objectV2.GetRangeResponseBody)
+ resp.SetBody(body)
+
+ body.SetRangePart(info.ToV2())
+
+ return resp
+}
+
+func setSplitInfoHeadResponse(info *objectSDK.SplitInfo, resp *objectV2.HeadResponse) {
+ resp.GetBody().SetHeaderPart(info.ToV2())
+}
+
+func setECInfoHeadResponse(info *objectSDK.ECInfo, resp *objectV2.HeadResponse) {
resp.GetBody().SetHeaderPart(info.ToV2())
}
@@ -629,7 +323,7 @@ func toHashResponse(typ refs.ChecksumType, res *getsvc.RangeHashRes) *objectV2.G
return resp
}
-func toFullObjectHeader(hdr *object.Object) objectV2.GetHeaderPart {
+func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart {
obj := hdr.ToV2()
hs := new(objectV2.HeaderWithSignature)
@@ -639,7 +333,7 @@ func toFullObjectHeader(hdr *object.Object) objectV2.GetHeaderPart {
return hs
}
-func toShortObjectHeader(hdr *object.Object) objectV2.GetHeaderPart {
+func toShortObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart {
hdrV2 := hdr.ToV2().GetHeader()
sh := new(objectV2.ShortHeader)
@@ -654,29 +348,30 @@ func toShortObjectHeader(hdr *object.Object) objectV2.GetHeaderPart {
return sh
}
-func groupAddressRequestForwarder(f func(network.Address, client.MultiAddressClient, []byte) (*object.Object, error)) getsvc.RequestForwarder {
- return func(info client.NodeInfo, c client.MultiAddressClient) (*object.Object, error) {
+func groupAddressRequestForwarder(f func(context.Context, network.Address, client.MultiAddressClient, []byte) (*objectSDK.Object, error)) getsvc.RequestForwarder {
+ return func(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) (*objectSDK.Object, error) {
var (
firstErr error
- res *object.Object
+ res *objectSDK.Object
key = info.PublicKey()
)
info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) {
var err error
+ res, err = f(ctx, addr, c, key)
- defer func() {
- stop = err == nil
+ // non-status logic error that could be returned
+ // from the SDK client; should not be considered
+ // as a connection error
+ var siErr *objectSDK.SplitInfoError
+ var eiErr *objectSDK.ECInfoError
- if stop || firstErr == nil {
- firstErr = err
- }
+ stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr)
- // would be nice to log otherwise
- }()
-
- res, err = f(addr, c, key)
+ if stop || firstErr == nil {
+ firstErr = err
+ }
return
})
@@ -715,3 +410,20 @@ func chunkToSend(global, local int, chunk []byte) []byte {
return chunk[global-local:]
}
+
+type apiResponse interface {
+ GetMetaHeader() *session.ResponseMetaHeader
+ GetVerificationHeader() *session.ResponseVerificationHeader
+}
+
+func verifyResponse(resp apiResponse, pubkey []byte) error {
+ if err := internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
+ return err
+ }
+
+ if err := signature.VerifyServiceMessage(resp); err != nil {
+ return errResponseVerificationFailed(err)
+ }
+
+ return checkStatus(resp.GetMetaHeader().GetStatus())
+}
diff --git a/pkg/services/object/get/writer.go b/pkg/services/object/get/writer.go
new file mode 100644
index 0000000000..3aa4d66ac4
--- /dev/null
+++ b/pkg/services/object/get/writer.go
@@ -0,0 +1,96 @@
+package getsvc
+
+import (
+ "context"
+ "io"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+)
+
+// ChunkWriter is an interface of target component
+// to write payload chunk.
+type ChunkWriter interface {
+ WriteChunk(context.Context, []byte) error
+}
+
+// HeaderWriter is an interface of target component
+// to write object header.
+type HeaderWriter interface {
+ WriteHeader(context.Context, *objectSDK.Object) error
+}
+
+// ObjectWriter is an interface of target component to write object.
+type ObjectWriter interface {
+ HeaderWriter
+ ChunkWriter
+}
+
+type SimpleObjectWriter struct {
+ obj *objectSDK.Object
+
+ pld []byte
+}
+
+type partWriter struct {
+ ObjectWriter
+
+ headWriter HeaderWriter
+
+ chunkWriter ChunkWriter
+}
+
+type hasherWrapper struct {
+ hash io.Writer
+}
+
+func NewSimpleObjectWriter() *SimpleObjectWriter {
+ return &SimpleObjectWriter{
+ obj: objectSDK.New(),
+ }
+}
+
+func (s *SimpleObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Object) error {
+ s.obj = obj
+ s.pld = make([]byte, 0, obj.PayloadSize())
+ return nil
+}
+
+func (s *SimpleObjectWriter) WriteChunk(_ context.Context, p []byte) error {
+ s.pld = append(s.pld, p...)
+ return nil
+}
+
+func (s *SimpleObjectWriter) Object() *objectSDK.Object {
+ if len(s.pld) > 0 {
+ s.obj.SetPayload(s.pld)
+ }
+
+ return s.obj
+}
+
+func (w *partWriter) WriteChunk(ctx context.Context, p []byte) error {
+ return w.chunkWriter.WriteChunk(ctx, p)
+}
+
+func (w *partWriter) WriteHeader(ctx context.Context, o *objectSDK.Object) error {
+ return w.headWriter.WriteHeader(ctx, o)
+}
+
+func (h *hasherWrapper) WriteChunk(_ context.Context, p []byte) error {
+ _, err := h.hash.Write(p)
+ return err
+}
+
+type payloadWriter struct {
+ origin ChunkWriter
+ obj *objectSDK.Object
+}
+
+func (w *payloadWriter) WriteChunk(ctx context.Context, p []byte) error {
+ return w.origin.WriteChunk(ctx, p)
+}
+
+func (w *payloadWriter) WriteHeader(_ context.Context, o *objectSDK.Object) error {
+ w.obj = o
+ return nil
+}
diff --git a/pkg/services/object/head/prm.go b/pkg/services/object/head/prm.go
deleted file mode 100644
index 5566e48fe1..0000000000
--- a/pkg/services/object/head/prm.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package headsvc
-
-import (
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type Prm struct {
- addr oid.Address
-}
-
-func (p *Prm) WithAddress(v oid.Address) *Prm {
- if p != nil {
- p.addr = v
- }
-
- return p
-}
diff --git a/pkg/services/object/head/remote.go b/pkg/services/object/head/remote.go
deleted file mode 100644
index f50c3422ac..0000000000
--- a/pkg/services/object/head/remote.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package headsvc
-
-import (
- "context"
- "errors"
- "fmt"
-
- clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type ClientConstructor interface {
- Get(clientcore.NodeInfo) (clientcore.Client, error)
-}
-
-// RemoteHeader represents utility for getting
-// the object header from a remote host.
-type RemoteHeader struct {
- keyStorage *util.KeyStorage
-
- clientCache ClientConstructor
-}
-
-// RemoteHeadPrm groups remote header operation parameters.
-type RemoteHeadPrm struct {
- commonHeadPrm *Prm
-
- node netmap.NodeInfo
-}
-
-const remoteOpTTL = 1
-
-var ErrNotFound = errors.New("object header not found")
-
-// NewRemoteHeader creates, initializes and returns new RemoteHeader instance.
-func NewRemoteHeader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteHeader {
- return &RemoteHeader{
- keyStorage: keyStorage,
- clientCache: cache,
- }
-}
-
-// WithNodeInfo sets information about the remote node.
-func (p *RemoteHeadPrm) WithNodeInfo(v netmap.NodeInfo) *RemoteHeadPrm {
- if p != nil {
- p.node = v
- }
-
- return p
-}
-
-// WithObjectAddress sets object address.
-func (p *RemoteHeadPrm) WithObjectAddress(v oid.Address) *RemoteHeadPrm {
- if p != nil {
- p.commonHeadPrm = new(Prm).WithAddress(v)
- }
-
- return p
-}
-
-// Head requests object header from the remote node.
-func (h *RemoteHeader) Head(ctx context.Context, prm *RemoteHeadPrm) (*object.Object, error) {
- key, err := h.keyStorage.GetKey(nil)
- if err != nil {
- return nil, fmt.Errorf("(%T) could not receive private key: %w", h, err)
- }
-
- var info clientcore.NodeInfo
-
- err = clientcore.NodeInfoFromRawNetmapElement(&info, netmapCore.Node(prm.node))
- if err != nil {
- return nil, fmt.Errorf("parse client node info: %w", err)
- }
-
- c, err := h.clientCache.Get(info)
- if err != nil {
- return nil, fmt.Errorf("(%T) could not create SDK client %s: %w", h, info.AddressGroup(), err)
- }
-
- var headPrm internalclient.HeadObjectPrm
-
- headPrm.SetContext(ctx)
- headPrm.SetClient(c)
- headPrm.SetPrivateKey(key)
- headPrm.SetAddress(prm.commonHeadPrm.addr)
- headPrm.SetTTL(remoteOpTTL)
-
- res, err := internalclient.HeadObject(headPrm)
- if err != nil {
- return nil, fmt.Errorf("(%T) could not head object in %s: %w", h, info.AddressGroup(), err)
- }
-
- return res.Header(), nil
-}
diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go
index 8f2483bc09..3e88326401 100644
--- a/pkg/services/object/internal/client/client.go
+++ b/pkg/services/object/internal/client/client.go
@@ -7,13 +7,16 @@ import (
"errors"
"fmt"
"io"
+ "strconv"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
@@ -21,8 +24,6 @@ import (
type commonPrm struct {
cli coreclient.Client
- ctx context.Context
-
key *ecdsa.PrivateKey
tokenSession *session.Object
@@ -32,6 +33,8 @@ type commonPrm struct {
local bool
xHeaders []string
+
+ netmapEpoch uint64
}
// SetClient sets base client for ForstFS API communication.
@@ -41,13 +44,6 @@ func (x *commonPrm) SetClient(cli coreclient.Client) {
x.cli = cli
}
-// SetContext sets context.Context for network communication.
-//
-// Required parameter.
-func (x *commonPrm) SetContext(ctx context.Context) {
- x.ctx = ctx
-}
-
// SetPrivateKey sets private key to sign the request(s).
//
// Required parameter.
@@ -81,22 +77,30 @@ func (x *commonPrm) SetXHeaders(hs []string) {
x.xHeaders = hs
}
+func (x *commonPrm) calculateXHeaders() []string {
+ hs := x.xHeaders
+ if x.netmapEpoch != 0 {
+ hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10))
+ }
+ return hs
+}
+
type readPrmCommon struct {
commonPrm
}
-// SetNetmapEpoch sets the epoch number to be used to locate the object.
+// SetNetmapEpoch sets the epoch number to be used to locate the objectSDK.
//
// By default current epoch on the server will be used.
-func (x *readPrmCommon) SetNetmapEpoch(_ uint64) {
- // FIXME: (neofs-node#1194) not supported by client
+func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) {
+ x.netmapEpoch = epoch
}
// GetObjectPrm groups parameters of GetObject operation.
type GetObjectPrm struct {
readPrmCommon
- cliPrm client.PrmObjectGet
+ ClientParams client.PrmObjectGet
obj oid.ID
}
@@ -105,7 +109,7 @@ type GetObjectPrm struct {
//
// By default request will not be raw.
func (x *GetObjectPrm) SetRawFlag() {
- x.cliPrm.MarkRaw()
+ x.ClientParams.Raw = true
}
// SetAddress sets object address.
@@ -113,17 +117,19 @@ func (x *GetObjectPrm) SetRawFlag() {
// Required parameter.
func (x *GetObjectPrm) SetAddress(addr oid.Address) {
x.obj = addr.Object()
- x.cliPrm.FromContainer(addr.Container())
- x.cliPrm.ByID(x.obj)
+ cnr := addr.Container()
+
+ x.ClientParams.ContainerID = &cnr
+ x.ClientParams.ObjectID = &x.obj
}
// GetObjectRes groups the resulting values of GetObject operation.
type GetObjectRes struct {
- obj *object.Object
+ obj *objectSDK.Object
}
-// Object returns requested object.
-func (x GetObjectRes) Object() *object.Object {
+// Object returns requested objectSDK.
+func (x GetObjectRes) Object() *objectSDK.Object {
return x.obj
}
@@ -133,37 +139,29 @@ func (x GetObjectRes) Object() *object.Object {
//
// Returns any error which prevented the operation from completing correctly in error return.
// Returns:
-// - error of type *object.SplitInfoError if object raw flag is set and requested object is virtual;
+// - error of type *objectSDK.SplitInfoError if object raw flag is set and requested object is virtual;
// - error of type *apistatus.ObjectAlreadyRemoved if the requested object is marked to be removed.
//
-// GetObject ignores the provided session if it is not related to the requested object.
-func GetObject(prm GetObjectPrm) (*GetObjectRes, error) {
+// GetObject ignores the provided session if it is not related to the requested objectSDK.
+func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
// here we ignore session if it is opened for other object since such
// request will almost definitely fail. The case can occur, for example,
// when session is bound to the parent object and child object is requested.
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
- prm.cliPrm.WithinSession(*prm.tokenSession)
+ prm.ClientParams.Session = prm.tokenSession
}
- if prm.tokenBearer != nil {
- prm.cliPrm.WithBearerToken(*prm.tokenBearer)
- }
+ prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.BearerToken = prm.tokenBearer
+ prm.ClientParams.Local = prm.local
+ prm.ClientParams.Key = prm.key
- if prm.local {
- prm.cliPrm.MarkLocal()
- }
-
- prm.cliPrm.WithXHeaders(prm.xHeaders...)
- if prm.key != nil {
- prm.cliPrm.UseKey(*prm.key)
- }
-
- rdr, err := prm.cli.ObjectGetInit(prm.ctx, prm.cliPrm)
+ rdr, err := prm.cli.ObjectGetInit(ctx, prm.ClientParams)
if err != nil {
return nil, fmt.Errorf("init object reading: %w", err)
}
- var obj object.Object
+ var obj objectSDK.Object
if !rdr.ReadHeader(&obj) {
res, err := rdr.Close()
@@ -195,7 +193,7 @@ func GetObject(prm GetObjectPrm) (*GetObjectRes, error) {
type HeadObjectPrm struct {
readPrmCommon
- cliPrm client.PrmObjectHead
+ ClientParams client.PrmObjectHead
obj oid.ID
}
@@ -204,7 +202,7 @@ type HeadObjectPrm struct {
//
// By default request will not be raw.
func (x *HeadObjectPrm) SetRawFlag() {
- x.cliPrm.MarkRaw()
+ x.ClientParams.Raw = true
}
// SetAddress sets object address.
@@ -212,48 +210,44 @@ func (x *HeadObjectPrm) SetRawFlag() {
// Required parameter.
func (x *HeadObjectPrm) SetAddress(addr oid.Address) {
x.obj = addr.Object()
- x.cliPrm.FromContainer(addr.Container())
- x.cliPrm.ByID(x.obj)
+ cnr := addr.Container()
+
+ x.ClientParams.ContainerID = &cnr
+ x.ClientParams.ObjectID = &x.obj
}
// HeadObjectRes groups the resulting values of GetObject operation.
type HeadObjectRes struct {
- hdr *object.Object
+ hdr *objectSDK.Object
}
// Header returns requested object header.
-func (x HeadObjectRes) Header() *object.Object {
+func (x HeadObjectRes) Header() *objectSDK.Object {
return x.hdr
}
// HeadObject reads object header by address.
//
-// Client, context and key must be set.
+// Client and key must be set.
//
// Returns any error which prevented the operation from completing correctly in error return.
// Returns:
//
-// error of type *object.SplitInfoError if object raw flag is set and requested object is virtual;
+// error of type *objectSDK.SplitInfoError if object raw flag is set and requested object is virtual;
// error of type *apistatus.ObjectAlreadyRemoved if the requested object is marked to be removed.
//
-// HeadObject ignores the provided session if it is not related to the requested object.
-func HeadObject(prm HeadObjectPrm) (*HeadObjectRes, error) {
- if prm.local {
- prm.cliPrm.MarkLocal()
- }
-
+// HeadObject ignores the provided session if it is not related to the requested objectSDK.
+func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) {
// see details in same statement of GetObject
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
- prm.cliPrm.WithinSession(*prm.tokenSession)
+ prm.ClientParams.Session = prm.tokenSession
}
- if prm.tokenBearer != nil {
- prm.cliPrm.WithBearerToken(*prm.tokenBearer)
- }
+ prm.ClientParams.BearerToken = prm.tokenBearer
+ prm.ClientParams.Local = prm.local
+ prm.ClientParams.XHeaders = prm.calculateXHeaders()
- prm.cliPrm.WithXHeaders(prm.xHeaders...)
-
- cliRes, err := prm.cli.ObjectHead(prm.ctx, prm.cliPrm)
+ cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams)
if err == nil {
// pull out an error from status
err = apistatus.ErrFromStatus(cliRes.Status())
@@ -263,7 +257,7 @@ func HeadObject(prm HeadObjectPrm) (*HeadObjectRes, error) {
return nil, fmt.Errorf("read object header from FrostFS: %w", err)
}
- var hdr object.Object
+ var hdr objectSDK.Object
if !cliRes.ReadHeader(&hdr) {
return nil, errors.New("missing object header in the response")
@@ -280,7 +274,7 @@ type PayloadRangePrm struct {
ln uint64
- cliPrm client.PrmObjectRange
+ ClientParams client.PrmObjectRange
obj oid.ID
}
@@ -289,7 +283,7 @@ type PayloadRangePrm struct {
//
// By default request will not be raw.
func (x *PayloadRangePrm) SetRawFlag() {
- x.cliPrm.MarkRaw()
+ x.ClientParams.Raw = true
}
// SetAddress sets object address.
@@ -297,15 +291,17 @@ func (x *PayloadRangePrm) SetRawFlag() {
// Required parameter.
func (x *PayloadRangePrm) SetAddress(addr oid.Address) {
x.obj = addr.Object()
- x.cliPrm.FromContainer(addr.Container())
- x.cliPrm.ByID(x.obj)
+ cnr := addr.Container()
+
+ x.ClientParams.ContainerID = &cnr
+ x.ClientParams.ObjectID = &x.obj
}
// SetRange range of the object payload to be read.
//
// Required parameter.
-func (x *PayloadRangePrm) SetRange(rng *object.Range) {
- x.cliPrm.SetOffset(rng.GetOffset())
+func (x *PayloadRangePrm) SetRange(rng *objectSDK.Range) {
+ x.ClientParams.Offset = rng.GetOffset()
x.ln = rng.GetLength()
}
@@ -326,34 +322,28 @@ const maxInitialBufferSize = 1024 * 1024 // 1 MiB
// PayloadRange reads object payload range by address.
//
-// Client, context and key must be set.
+// Client and key must be set.
//
// Returns any error which prevented the operation from completing correctly in error return.
// Returns:
//
-// error of type *object.SplitInfoError if object raw flag is set and requested object is virtual;
+// error of type *objectSDK.SplitInfoError if object raw flag is set and requested object is virtual;
// error of type *apistatus.ObjectAlreadyRemoved if the requested object is marked to be removed;
// error of type *apistatus.ObjectOutOfRange if the requested range is too big.
//
-// PayloadRange ignores the provided session if it is not related to the requested object.
-func PayloadRange(prm PayloadRangePrm) (*PayloadRangeRes, error) {
- if prm.local {
- prm.cliPrm.MarkLocal()
- }
-
+// PayloadRange ignores the provided session if it is not related to the requested objectSDK.
+func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) {
// see details in same statement of GetObject
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
- prm.cliPrm.WithinSession(*prm.tokenSession)
+ prm.ClientParams.Session = prm.tokenSession
}
- if prm.tokenBearer != nil {
- prm.cliPrm.WithBearerToken(*prm.tokenBearer)
- }
+ prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.BearerToken = prm.tokenBearer
+ prm.ClientParams.Local = prm.local
+ prm.ClientParams.Length = prm.ln
- prm.cliPrm.SetLength(prm.ln)
- prm.cliPrm.WithXHeaders(prm.xHeaders...)
-
- rdr, err := prm.cli.ObjectRangeInit(prm.ctx, prm.cliPrm)
+ rdr, err := prm.cli.ObjectRangeInit(ctx, prm.ClientParams)
if err != nil {
return nil, fmt.Errorf("init payload reading: %w", err)
}
@@ -365,12 +355,9 @@ func PayloadRange(prm PayloadRangePrm) (*PayloadRangeRes, error) {
return nil, new(apistatus.ObjectOutOfRange)
}
- ln := prm.ln
- if ln > maxInitialBufferSize {
- ln = maxInitialBufferSize
- }
+ ln := min(prm.ln, maxInitialBufferSize)
- w := bytes.NewBuffer(make([]byte, ln))
+ w := bytes.NewBuffer(make([]byte, 0, ln))
_, err = io.CopyN(w, rdr, int64(prm.ln))
if err != nil {
return nil, fmt.Errorf("read payload: %w", err)
@@ -385,13 +372,13 @@ func PayloadRange(prm PayloadRangePrm) (*PayloadRangeRes, error) {
type PutObjectPrm struct {
commonPrm
- obj *object.Object
+ obj *objectSDK.Object
}
// SetObject sets object to be stored.
//
// Required parameter.
-func (x *PutObjectPrm) SetObject(obj *object.Object) {
+func (x *PutObjectPrm) SetObject(obj *objectSDK.Object) {
x.obj = obj
}
@@ -400,45 +387,38 @@ type PutObjectRes struct {
id oid.ID
}
-// ID returns identifier of the stored object.
+// ID returns identifier of the stored objectSDK.
func (x PutObjectRes) ID() oid.ID {
return x.id
}
// PutObject saves the object in local storage of the remote node.
//
-// Client, context and key must be set.
+// Client and key must be set.
//
// Returns any error which prevented the operation from completing correctly in error return.
-func PutObject(prm PutObjectPrm) (*PutObjectRes, error) {
- var prmCli client.PrmObjectPutInit
+func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "client.PutObject")
+ defer span.End()
- prmCli.MarkLocal()
-
- if prm.key != nil {
- prmCli.UseKey(*prm.key)
+ prmCli := client.PrmObjectPutInit{
+ XHeaders: prm.calculateXHeaders(),
+ BearerToken: prm.tokenBearer,
+ Session: prm.tokenSession,
+ Local: true,
+ Key: prm.key,
}
- if prm.tokenSession != nil {
- prmCli.WithinSession(*prm.tokenSession)
- }
-
- if prm.tokenBearer != nil {
- prmCli.WithBearerToken(*prm.tokenBearer)
- }
-
- prmCli.WithXHeaders(prm.xHeaders...)
-
- w, err := prm.cli.ObjectPutInit(prm.ctx, prmCli)
+ w, err := prm.cli.ObjectPutInit(ctx, prmCli)
if err != nil {
return nil, fmt.Errorf("init object writing on client: %w", err)
}
- if w.WriteHeader(*prm.obj) {
- w.WritePayloadChunk(prm.obj.Payload())
+ if w.WriteHeader(ctx, *prm.obj) {
+ w.WritePayloadChunk(ctx, prm.obj.Payload())
}
- cliRes, err := w.Close()
+ cliRes, err := w.Close(ctx)
if err == nil {
err = apistatus.ErrFromStatus(cliRes.Status())
} else {
@@ -454,6 +434,44 @@ func PutObject(prm PutObjectPrm) (*PutObjectRes, error) {
}, nil
}
+// PutObjectSingle saves the object in local storage of the remote node with PutSingle RPC.
+//
+// Client and key must be set.
+//
+// Returns any error which prevented the operation from completing correctly in error return.
+func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "client.PutObjectSingle")
+ defer span.End()
+
+ objID, isSet := prm.obj.ID()
+ if !isSet {
+ return nil, errors.New("missing object id")
+ }
+
+ prmCli := client.PrmObjectPutSingle{
+ XHeaders: prm.calculateXHeaders(),
+ BearerToken: prm.tokenBearer,
+ Session: prm.tokenSession,
+ Local: true,
+ Key: prm.key,
+ Object: prm.obj,
+ }
+
+ res, err := prm.cli.ObjectPutSingle(ctx, prmCli)
+ if err != nil {
+ ReportError(prm.cli, err)
+ return nil, fmt.Errorf("put single object on client: %w", err)
+ }
+
+ if err = apistatus.ErrFromStatus(res.Status()); err != nil {
+ return nil, fmt.Errorf("put single object via client: %w", err)
+ }
+
+ return &PutObjectRes{
+ id: objID,
+ }, nil
+}
+
// SearchObjectsPrm groups parameters of SearchObjects operation.
type SearchObjectsPrm struct {
readPrmCommon
@@ -465,12 +483,12 @@ type SearchObjectsPrm struct {
//
// Required parameter.
func (x *SearchObjectsPrm) SetContainerID(id cid.ID) {
- x.cliPrm.InContainer(id)
+ x.cliPrm.ContainerID = &id
}
// SetFilters sets search filters.
-func (x *SearchObjectsPrm) SetFilters(fs object.SearchFilters) {
- x.cliPrm.SetFilters(fs)
+func (x *SearchObjectsPrm) SetFilters(fs objectSDK.SearchFilters) {
+ x.cliPrm.Filters = fs
}
// SearchObjectsRes groups the resulting values of SearchObjects operation.
@@ -486,26 +504,14 @@ func (x SearchObjectsRes) IDList() []oid.ID {
// SearchObjects selects objects from container which match the filters.
//
// Returns any error which prevented the operation from completing correctly in error return.
-func SearchObjects(prm SearchObjectsPrm) (*SearchObjectsRes, error) {
- if prm.local {
- prm.cliPrm.MarkLocal()
- }
+func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes, error) {
+ prm.cliPrm.Local = prm.local
+ prm.cliPrm.Session = prm.tokenSession
+ prm.cliPrm.BearerToken = prm.tokenBearer
+ prm.cliPrm.XHeaders = prm.calculateXHeaders()
+ prm.cliPrm.Key = prm.key
- if prm.tokenSession != nil {
- prm.cliPrm.WithinSession(*prm.tokenSession)
- }
-
- if prm.tokenBearer != nil {
- prm.cliPrm.WithBearerToken(*prm.tokenBearer)
- }
-
- prm.cliPrm.WithXHeaders(prm.xHeaders...)
-
- if prm.key != nil {
- prm.cliPrm.UseKey(*prm.key)
- }
-
- rdr, err := prm.cli.ObjectSearchInit(prm.ctx, prm.cliPrm)
+ rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm)
if err != nil {
return nil, fmt.Errorf("init object searching in client: %w", err)
}
diff --git a/pkg/services/object/internal/key.go b/pkg/services/object/internal/key.go
index 7ab5f082ca..1e0a7ef903 100644
--- a/pkg/services/object/internal/key.go
+++ b/pkg/services/object/internal/key.go
@@ -3,14 +3,15 @@ package internal
import (
"bytes"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
// VerifyResponseKeyV2 checks if response is signed with expected key. Returns client.ErrWrongPublicKey if not.
func VerifyResponseKeyV2(expectedKey []byte, resp interface {
GetVerificationHeader() *session.ResponseVerificationHeader
-}) error {
+},
+) error {
if !bytes.Equal(resp.GetVerificationHeader().GetBodySignature().GetKey(), expectedKey) {
return client.ErrWrongPublicKey
}
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index 9f15e834af..6a6ee0f0f3 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -4,8 +4,9 @@ import (
"context"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type (
@@ -27,25 +28,15 @@ type (
start time.Time
}
+ patchStreamMetric struct {
+ stream PatchObjectStream
+ metrics MetricRegister
+ start time.Time
+ }
+
MetricRegister interface {
- IncGetReqCounter(success bool)
- IncPutReqCounter(success bool)
- IncHeadReqCounter(success bool)
- IncSearchReqCounter(success bool)
- IncDeleteReqCounter(success bool)
- IncRangeReqCounter(success bool)
- IncRangeHashReqCounter(success bool)
-
- AddGetReqDuration(time.Duration)
- AddPutReqDuration(time.Duration)
- AddHeadReqDuration(time.Duration)
- AddSearchReqDuration(time.Duration)
- AddDeleteReqDuration(time.Duration)
- AddRangeReqDuration(time.Duration)
- AddRangeHashReqDuration(time.Duration)
-
- AddPutPayload(int)
- AddGetPayload(int)
+ AddRequestDuration(string, time.Duration, bool, string)
+ AddPayloadSize(string, int)
}
)
@@ -61,8 +52,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
if m.enabled {
t := time.Now()
defer func() {
- m.metrics.IncGetReqCounter(err == nil)
- m.metrics.AddGetReqDuration(time.Since(t))
+ m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
}()
err = m.next.Get(req, &getStreamMetric{
ServerStream: stream,
@@ -93,14 +83,47 @@ func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
return m.next.Put(ctx)
}
+func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) {
+ if m.enabled {
+ t := time.Now()
+
+ stream, err := m.next.Patch(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return &patchStreamMetric{
+ stream: stream,
+ metrics: m.metrics,
+ start: t,
+ }, nil
+ }
+ return m.next.Patch(ctx)
+}
+
+func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) {
+ if m.enabled {
+ t := time.Now()
+
+ res, err := m.next.PutSingle(ctx, request)
+
+ m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ if err == nil {
+ m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload()))
+ }
+
+ return res, err
+ }
+ return m.next.PutSingle(ctx, request)
+}
+
func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
if m.enabled {
t := time.Now()
res, err := m.next.Head(ctx, request)
- m.metrics.IncHeadReqCounter(err == nil)
- m.metrics.AddHeadReqDuration(time.Since(t))
+ m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
@@ -113,8 +136,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream)
err := m.next.Search(req, stream)
- m.metrics.IncSearchReqCounter(err == nil)
- m.metrics.AddSearchReqDuration(time.Since(t))
+ m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
return err
}
@@ -127,8 +149,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque
res, err := m.next.Delete(ctx, request)
- m.metrics.IncDeleteReqCounter(err == nil)
- m.metrics.AddDeleteReqDuration(time.Since(t))
+ m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
return m.next.Delete(ctx, request)
@@ -140,8 +161,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR
err := m.next.GetRange(req, stream)
- m.metrics.IncRangeReqCounter(err == nil)
- m.metrics.AddRangeReqDuration(time.Since(t))
+ m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
return err
}
@@ -154,8 +174,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa
res, err := m.next.GetRangeHash(ctx, request)
- m.metrics.IncRangeHashReqCounter(err == nil)
- m.metrics.AddRangeHashReqDuration(time.Since(t))
+ m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
@@ -173,26 +192,39 @@ func (m *MetricCollector) Disable() {
func (s getStreamMetric) Send(resp *object.GetResponse) error {
chunk, ok := resp.GetBody().GetObjectPart().(*object.GetObjectPartChunk)
if ok {
- s.metrics.AddGetPayload(len(chunk.GetChunk()))
+ s.metrics.AddPayloadSize("Get", len(chunk.GetChunk()))
}
return s.stream.Send(resp)
}
-func (s putStreamMetric) Send(req *object.PutRequest) error {
+func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error {
chunk, ok := req.GetBody().GetObjectPart().(*object.PutObjectPartChunk)
if ok {
- s.metrics.AddPutPayload(len(chunk.GetChunk()))
+ s.metrics.AddPayloadSize("Put", len(chunk.GetChunk()))
}
- return s.stream.Send(req)
+ return s.stream.Send(ctx, req)
}
-func (s putStreamMetric) CloseAndRecv() (*object.PutResponse, error) {
- res, err := s.stream.CloseAndRecv()
+func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.IncPutReqCounter(err == nil)
- s.metrics.AddPutReqDuration(time.Since(s.start))
+ s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
+
+ return res, err
+}
+
+func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) error {
+ s.metrics.AddPayloadSize("Patch", len(req.GetBody().GetPatch().GetChunk()))
+
+ return s.stream.Send(ctx, req)
+}
+
+func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
+ res, err := s.stream.CloseAndRecv(ctx)
+
+ s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
return res, err
}
diff --git a/pkg/services/object/patch/range_provider.go b/pkg/services/object/patch/range_provider.go
new file mode 100644
index 0000000000..cb3f7c342b
--- /dev/null
+++ b/pkg/services/object/patch/range_provider.go
@@ -0,0 +1,75 @@
+package patchsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "io"
+
+ getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ objectUtil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ patcherSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher"
+)
+
+func (p *pipeChunkWriter) WriteChunk(_ context.Context, chunk []byte) error {
+ _, err := p.wr.Write(chunk)
+ return err
+}
+
+type rangeProvider struct {
+ getSvc *getsvc.Service
+
+ addr oid.Address
+
+ commonPrm *objectUtil.CommonPrm
+
+ localNodeKey *ecdsa.PrivateKey
+}
+
+var _ patcherSDK.RangeProvider = (*rangeProvider)(nil)
+
+func (r *rangeProvider) GetRange(ctx context.Context, rng *objectSDK.Range) io.Reader {
+ // Remote GetRange request to a container node uses an SDK-client that fails range validation
+ // with zero-length. However, from the patcher's point of view, such request is still valid.
+ if rng.GetLength() == 0 {
+ return &nopReader{}
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ var rngPrm getsvc.RangePrm
+ rngPrm.SetSignerKey(r.localNodeKey)
+ rngPrm.SetCommonParameters(r.commonPrm)
+
+ rngPrm.WithAddress(r.addr)
+ rngPrm.SetChunkWriter(&pipeChunkWriter{
+ wr: pipeWriter,
+ })
+ rngPrm.SetRange(rng)
+
+ getRangeErr := make(chan error)
+
+ go func() {
+ defer pipeWriter.Close()
+
+ select {
+ case <-ctx.Done():
+ pipeWriter.CloseWithError(ctx.Err())
+ case err := <-getRangeErr:
+ pipeWriter.CloseWithError(err)
+ }
+ }()
+
+ go func() {
+ getRangeErr <- r.getSvc.GetRange(ctx, rngPrm)
+ }()
+
+ return pipeReader
+}
+
+type nopReader struct{}
+
+func (nopReader) Read(_ []byte) (int, error) {
+ return 0, io.EOF
+}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
new file mode 100644
index 0000000000..5d298bfed0
--- /dev/null
+++ b/pkg/services/object/patch/service.go
@@ -0,0 +1,41 @@
+package patchsvc
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+)
+
+// Service implements Put operation of Object service v2.
+type Service struct {
+ *objectwriter.Config
+
+ getSvc *getsvc.Service
+}
+
+// NewService constructs Service instance from provided options.
+//
+// Patch service can use the same objectwriter.Config initializied by Put service.
+func NewService(cfg *objectwriter.Config,
+ getSvc *getsvc.Service,
+) *Service {
+ return &Service{
+ Config: cfg,
+
+ getSvc: getSvc,
+ }
+}
+
+// Patch calls internal service and returns v2 object streamer.
+func (s *Service) Patch() (object.PatchObjectStream, error) {
+ nodeKey, err := s.KeyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Streamer{
+ Config: s.Config,
+ getSvc: s.getSvc,
+ localNodeKey: nodeKey,
+ }, nil
+}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
new file mode 100644
index 0000000000..ff13b1d3ed
--- /dev/null
+++ b/pkg/services/object/patch/streamer.go
@@ -0,0 +1,243 @@
+package patchsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+ "io"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher"
+)
+
+// Streamer for the patch handler is a pipeline that merges two incoming streams of patches
+// and original object payload chunks. The merged result is fed to Put stream target.
+type Streamer struct {
+ *objectwriter.Config
+
+ // Patcher must be initialized at first Streamer.Send call.
+ patcher patcher.PatchApplier
+
+ nonFirstSend bool
+
+ getSvc *getsvc.Service
+
+ localNodeKey *ecdsa.PrivateKey
+}
+
+type pipeChunkWriter struct {
+ wr *io.PipeWriter
+}
+
+type headResponseWriter struct {
+ body *objectV2.HeadResponseBody
+}
+
+func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *objectSDK.Object) error {
+ w.body.SetHeaderPart(toFullObjectHeader(hdr))
+ return nil
+}
+
+func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart {
+ obj := hdr.ToV2()
+
+ hs := new(objectV2.HeaderWithSignature)
+ hs.SetHeader(obj.GetHeader())
+ hs.SetSignature(obj.GetSignature())
+
+ return hs
+}
+
+func isLinkObject(hdr *objectV2.HeaderWithSignature) bool {
+ split := hdr.GetHeader().GetSplit()
+ return len(split.GetChildren()) > 0 && split.GetParent() != nil
+}
+
+func isComplexObjectPart(hdr *objectV2.HeaderWithSignature) bool {
+ return hdr.GetHeader().GetEC() != nil || hdr.GetHeader().GetSplit() != nil
+}
+
+func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
+ hdrWithSig, addr, err := s.readHeader(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ if hdrWithSig.GetHeader().GetObjectType() != objectV2.TypeRegular {
+ return errors.New("non-regular object can't be patched")
+ }
+ if isLinkObject(hdrWithSig) {
+ return errors.New("linking object can't be patched")
+ }
+ if isComplexObjectPart(hdrWithSig) {
+ return errors.New("complex object parts can't be patched")
+ }
+
+ commonPrm, err := util.CommonPrmFromV2(req)
+ if err != nil {
+ return err
+ }
+ commonPrm.WithLocalOnly(false)
+
+ rangeProvider := &rangeProvider{
+ getSvc: s.getSvc,
+
+ addr: addr,
+
+ commonPrm: commonPrm,
+
+ localNodeKey: s.localNodeKey,
+ }
+
+ hdr := hdrWithSig.GetHeader()
+ oV2 := new(objectV2.Object)
+ hV2 := new(objectV2.Header)
+ oV2.SetHeader(hV2)
+ oV2.GetHeader().SetContainerID(hdr.GetContainerID())
+ oV2.GetHeader().SetPayloadLength(hdr.GetPayloadLength())
+ oV2.GetHeader().SetAttributes(hdr.GetAttributes())
+
+ ownerID, err := newOwnerID(req.GetVerificationHeader())
+ if err != nil {
+ return err
+ }
+ oV2.GetHeader().SetOwnerID(ownerID)
+
+ target, err := target.New(ctx, objectwriter.Params{
+ Config: s.Config,
+ Common: commonPrm,
+ Header: objectSDK.NewFromV2(oV2),
+ })
+ if err != nil {
+ return fmt.Errorf("target creation: %w", err)
+ }
+
+ patcherPrm := patcher.Params{
+ Header: objectSDK.NewFromV2(oV2),
+
+ RangeProvider: rangeProvider,
+
+ ObjectWriter: target,
+ }
+
+ s.patcher = patcher.New(patcherPrm)
+ return nil
+}
+
+func (s *Streamer) readHeader(ctx context.Context, req *objectV2.PatchRequest) (hdrWithSig *objectV2.HeaderWithSignature, addr oid.Address, err error) {
+ addrV2 := req.GetBody().GetAddress()
+ if addrV2 == nil {
+ err = errors.New("patch request has nil-address")
+ return
+ }
+
+ if err = addr.ReadFromV2(*addrV2); err != nil {
+ err = fmt.Errorf("read address error: %w", err)
+ return
+ }
+
+ commonPrm, err := util.CommonPrmFromV2(req)
+ if err != nil {
+ return
+ }
+ commonPrm.WithLocalOnly(false)
+
+ var p getsvc.HeadPrm
+ p.SetSignerKey(s.localNodeKey)
+ p.SetCommonParameters(commonPrm)
+
+ resp := new(objectV2.HeadResponse)
+ resp.SetBody(new(objectV2.HeadResponseBody))
+
+ p.WithAddress(addr)
+ p.SetHeaderWriter(&headResponseWriter{
+ body: resp.GetBody(),
+ })
+
+ err = s.getSvc.Head(ctx, p)
+ if err != nil {
+ err = fmt.Errorf("get header error: %w", err)
+ return
+ }
+
+ var ok bool
+ hdrPart := resp.GetBody().GetHeaderPart()
+ if hdrWithSig, ok = hdrPart.(*objectV2.HeaderWithSignature); !ok {
+ err = fmt.Errorf("unexpected header type: %T", hdrPart)
+ }
+ return
+}
+
+func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "patch.streamer.Send")
+ defer span.End()
+
+ defer func() {
+ s.nonFirstSend = true
+ }()
+
+ if !s.nonFirstSend {
+ if err := s.init(ctx, req); err != nil {
+ return fmt.Errorf("streamer init error: %w", err)
+ }
+ }
+
+ patch := new(objectSDK.Patch)
+ patch.FromV2(req.GetBody())
+
+ if !s.nonFirstSend {
+ err := s.patcher.ApplyHeaderPatch(ctx,
+ patcher.ApplyHeaderPatchPrm{
+ NewSplitHeader: patch.NewSplitHeader,
+ NewAttributes: patch.NewAttributes,
+ ReplaceAttributes: patch.ReplaceAttributes,
+ })
+ if err != nil {
+ return fmt.Errorf("patch attributes: %w", err)
+ }
+ }
+
+ if patch.PayloadPatch != nil {
+ err := s.patcher.ApplyPayloadPatch(ctx, patch.PayloadPatch)
+ if err != nil {
+ return fmt.Errorf("patch payload: %w", err)
+ }
+ } else if s.nonFirstSend {
+ return errors.New("invalid non-first patch: empty payload")
+ }
+
+ return nil
+}
+
+func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ if s.patcher == nil {
+ return nil, errors.New("uninitialized patch streamer")
+ }
+ patcherResp, err := s.patcher.Close(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ oidV2 := new(refsV2.ObjectID)
+
+ if patcherResp.AccessIdentifiers.ParentID != nil {
+ patcherResp.AccessIdentifiers.ParentID.WriteToV2(oidV2)
+ } else {
+ patcherResp.AccessIdentifiers.SelfID.WriteToV2(oidV2)
+ }
+
+ return &objectV2.PatchResponse{
+ Body: &objectV2.PatchResponseBody{
+ ObjectID: oidV2,
+ },
+ }, nil
+}
diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go
new file mode 100644
index 0000000000..b9416789ce
--- /dev/null
+++ b/pkg/services/object/patch/util.go
@@ -0,0 +1,34 @@
+package patchsvc
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+func newOwnerID(vh *session.RequestVerificationHeader) (*refs.OwnerID, error) {
+ for vh.GetOrigin() != nil {
+ vh = vh.GetOrigin()
+ }
+ sig := vh.GetBodySignature()
+ if sig == nil {
+ return nil, errors.New("empty body signature")
+ }
+ key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("invalid signature key: %w", err)
+ }
+
+ var userID user.ID
+ user.IDFromKey(&userID, (ecdsa.PublicKey)(*key))
+ ownID := new(refs.OwnerID)
+ userID.WriteToV2(ownID)
+
+ return ownID, nil
+}
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
deleted file mode 100644
index 8d47e63bed..0000000000
--- a/pkg/services/object/put/distributed.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package putsvc
-
-import (
- "fmt"
- "sync"
- "sync/atomic"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "go.uber.org/zap"
-)
-
-type preparedObjectTarget interface {
- WriteObject(*objectSDK.Object, object.ContentMeta) error
- Close() (*transformer.AccessIdentifiers, error)
-}
-
-type distributedTarget struct {
- traversal traversal
-
- remotePool, localPool util.WorkerPool
-
- obj *objectSDK.Object
- objMeta object.ContentMeta
-
- payload []byte
-
- nodeTargetInitializer func(nodeDesc) preparedObjectTarget
-
- isLocalKey func([]byte) bool
-
- relay func(nodeDesc) error
-
- fmt *object.FormatValidator
-
- log *logger.Logger
-}
-
-// parameters and state of container traversal.
-type traversal struct {
- opts []placement.Option
-
- // need of additional broadcast after the object is saved
- extraBroadcastEnabled bool
-
- // mtx protects mExclude map.
- mtx sync.RWMutex
-
- // container nodes which was processed during the primary object placement
- mExclude map[string]struct{}
-}
-
-// updates traversal parameters after the primary placement finish and
-// returns true if additional container broadcast is needed.
-func (x *traversal) submitPrimaryPlacementFinish() bool {
- if x.extraBroadcastEnabled {
- // do not track success during container broadcast (best-effort)
- x.opts = append(x.opts, placement.WithoutSuccessTracking())
-
- // avoid 2nd broadcast
- x.extraBroadcastEnabled = false
-
- return true
- }
-
- return false
-}
-
-// marks the container node as processed during the primary object placement.
-func (x *traversal) submitProcessed(n placement.Node) {
- if x.extraBroadcastEnabled {
- key := string(n.PublicKey())
-
- x.mtx.Lock()
- if x.mExclude == nil {
- x.mExclude = make(map[string]struct{}, 1)
- }
-
- x.mExclude[key] = struct{}{}
- x.mtx.Unlock()
- }
-}
-
-// checks if specified node was processed during the primary object placement.
-func (x *traversal) processed(n placement.Node) bool {
- x.mtx.RLock()
- _, ok := x.mExclude[string(n.PublicKey())]
- x.mtx.RUnlock()
- return ok
-}
-
-type nodeDesc struct {
- local bool
-
- info placement.Node
-}
-
-// errIncompletePut is returned if processing on a container fails.
-type errIncompletePut struct {
- singleErr error // error from the last responding node
-}
-
-func (x errIncompletePut) Error() string {
- const commonMsg = "incomplete object PUT by placement"
-
- if x.singleErr != nil {
- return fmt.Sprintf("%s: %v", commonMsg, x.singleErr)
- }
-
- return commonMsg
-}
-
-func (t *distributedTarget) WriteHeader(obj *objectSDK.Object) error {
- t.obj = obj
-
- return nil
-}
-
-func (t *distributedTarget) Write(p []byte) (n int, err error) {
- t.payload = append(t.payload, p...)
-
- return len(p), nil
-}
-
-func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) {
- defer func() {
- putPayload(t.payload)
- t.payload = nil
- }()
-
- t.obj.SetPayload(t.payload)
-
- var err error
-
- if t.objMeta, err = t.fmt.ValidateContent(t.obj); err != nil {
- return nil, fmt.Errorf("(%T) could not validate payload content: %w", t, err)
- }
-
- if len(t.obj.Children()) > 0 {
- // enabling extra broadcast for linking objects
- t.traversal.extraBroadcastEnabled = true
- }
-
- return t.iteratePlacement(t.sendObject)
-}
-
-func (t *distributedTarget) sendObject(node nodeDesc) error {
- if !node.local && t.relay != nil {
- return t.relay(node)
- }
-
- target := t.nodeTargetInitializer(node)
-
- if err := target.WriteObject(t.obj, t.objMeta); err != nil {
- return fmt.Errorf("could not write header: %w", err)
- } else if _, err := target.Close(); err != nil {
- return fmt.Errorf("could not close object stream: %w", err)
- }
- return nil
-}
-
-func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transformer.AccessIdentifiers, error) {
- id, _ := t.obj.ID()
-
- traverser, err := placement.NewTraverser(
- append(t.traversal.opts, placement.ForObject(id))...,
- )
- if err != nil {
- return nil, fmt.Errorf("(%T) could not create object placement traverser: %w", t, err)
- }
-
- var resErr atomic.Value
-
-loop:
- for {
- addrs := traverser.Next()
- if len(addrs) == 0 {
- break
- }
-
- wg := new(sync.WaitGroup)
-
- for i := range addrs {
- if t.traversal.processed(addrs[i]) {
- // it can happen only during additional container broadcast
- continue
- }
-
- wg.Add(1)
-
- addr := addrs[i]
-
- isLocal := t.isLocalKey(addr.PublicKey())
-
- var workerPool util.WorkerPool
-
- if isLocal {
- workerPool = t.localPool
- } else {
- workerPool = t.remotePool
- }
-
- if err := workerPool.Submit(func() {
- defer wg.Done()
-
- err := f(nodeDesc{local: isLocal, info: addr})
-
- // mark the container node as processed in order to exclude it
- // in subsequent container broadcast. Note that we don't
- // process this node during broadcast if primary placement
- // on it failed.
- t.traversal.submitProcessed(addr)
-
- if err != nil {
- resErr.Store(err)
- svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err)
- return
- }
-
- traverser.SubmitSuccess()
- }); err != nil {
- wg.Done()
-
- svcutil.LogWorkerPoolError(t.log, "PUT", err)
-
- break loop
- }
- }
-
- wg.Wait()
- }
-
- if !traverser.Success() {
- var err errIncompletePut
-
- err.singleErr, _ = resErr.Load().(error)
-
- return nil, err
- }
-
- // perform additional container broadcast if needed
- if t.traversal.submitPrimaryPlacementFinish() {
- _, err = t.iteratePlacement(f)
- if err != nil {
- t.log.Error("additional container broadcast failure",
- zap.Error(err),
- )
-
- // we don't fail primary operation because of broadcast failure
- }
- }
-
- id, _ = t.obj.ID()
-
- return new(transformer.AccessIdentifiers).
- WithSelfID(id), nil
-}
diff --git a/pkg/services/object/put/local.go b/pkg/services/object/put/local.go
deleted file mode 100644
index f344f77e96..0000000000
--- a/pkg/services/object/put/local.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package putsvc
-
-import (
- "fmt"
-
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-// ObjectStorage is an object storage interface.
-type ObjectStorage interface {
- // Put must save passed object
- // and return any appeared error.
- Put(*object.Object) error
- // Delete must delete passed objects
- // and return any appeared error.
- Delete(tombstone oid.Address, toDelete []oid.ID) error
- // Lock must lock passed objects
- // and return any appeared error.
- Lock(locker oid.Address, toLock []oid.ID) error
- // IsLocked must clarify object's lock status.
- IsLocked(oid.Address) (bool, error)
-}
-
-type localTarget struct {
- storage ObjectStorage
-
- obj *object.Object
- meta objectCore.ContentMeta
-}
-
-func (t *localTarget) WriteObject(obj *object.Object, meta objectCore.ContentMeta) error {
- t.obj = obj
- t.meta = meta
-
- return nil
-}
-
-func (t *localTarget) Close() (*transformer.AccessIdentifiers, error) {
- switch t.meta.Type() {
- case object.TypeTombstone:
- err := t.storage.Delete(objectCore.AddressOf(t.obj), t.meta.Objects())
- if err != nil {
- return nil, fmt.Errorf("could not delete objects from tombstone locally: %w", err)
- }
- case object.TypeLock:
- err := t.storage.Lock(objectCore.AddressOf(t.obj), t.meta.Objects())
- if err != nil {
- return nil, fmt.Errorf("could not lock object from lock objects locally: %w", err)
- }
- default:
- // objects that do not change meta storage
- }
-
- if err := t.storage.Put(t.obj); err != nil {
- return nil, fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
- }
-
- id, _ := t.obj.ID()
-
- return new(transformer.AccessIdentifiers).
- WithSelfID(id), nil
-}
diff --git a/pkg/services/object/put/pool.go b/pkg/services/object/put/pool.go
deleted file mode 100644
index 705273227d..0000000000
--- a/pkg/services/object/put/pool.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package putsvc
-
-import (
- "sync"
-)
-
-const defaultAllocSize = 1024
-
-var putBytesPool = &sync.Pool{
- New: func() any { return make([]byte, 0, defaultAllocSize) },
-}
-
-func getPayload() []byte {
- return putBytesPool.Get().([]byte)
-}
-
-func putPayload(p []byte) {
- //nolint:staticcheck
- putBytesPool.Put(p[:0])
-}
diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go
index aea5926f40..52a7c102c3 100644
--- a/pkg/services/object/put/prm.go
+++ b/pkg/services/object/put/prm.go
@@ -1,23 +1,25 @@
package putsvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
type PutInitPrm struct {
common *util.CommonPrm
- hdr *object.Object
+ hdr *objectSDK.Object
cnr containerSDK.Container
traverseOpts []placement.Option
- relay func(client.NodeInfo, client.MultiAddressClient) error
+ relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
}
type PutChunkPrm struct {
@@ -32,7 +34,7 @@ func (p *PutInitPrm) WithCommonPrm(v *util.CommonPrm) *PutInitPrm {
return p
}
-func (p *PutInitPrm) WithObject(v *object.Object) *PutInitPrm {
+func (p *PutInitPrm) WithObject(v *objectSDK.Object) *PutInitPrm {
if p != nil {
p.hdr = v
}
@@ -40,7 +42,15 @@ func (p *PutInitPrm) WithObject(v *object.Object) *PutInitPrm {
return p
}
-func (p *PutInitPrm) WithRelay(f func(client.NodeInfo, client.MultiAddressClient) error) *PutInitPrm {
+func (p *PutInitPrm) WithCopyNumbers(v []uint32) *PutInitPrm {
+ if p != nil && len(v) > 0 {
+ p.traverseOpts = append(p.traverseOpts, placement.WithCopyNumbers(v))
+ }
+
+ return p
+}
+
+func (p *PutInitPrm) WithRelay(f func(context.Context, client.NodeInfo, client.MultiAddressClient) error) *PutInitPrm {
if p != nil {
p.relay = f
}
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index b74c97d490..7aeb5857df 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -1,149 +1,63 @@
package putsvc
import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
-type MaxSizeSource interface {
- // MaxObjectSize returns maximum payload size
- // of physically stored object in system.
- //
- // Must return 0 if value can not be obtained.
- MaxObjectSize() uint64
-}
-
type Service struct {
- *cfg
+ *objectwriter.Config
}
-type Option func(*cfg)
-
-type ClientConstructor interface {
- Get(client.NodeInfo) (client.MultiAddressClient, error)
-}
-
-type cfg struct {
- keyStorage *objutil.KeyStorage
-
- maxSizeSrc MaxSizeSource
-
- localStore ObjectStorage
-
- cnrSrc container.Source
-
- netMapSrc netmap.Source
-
- remotePool, localPool util.WorkerPool
-
- netmapKeys netmap.AnnouncedKeys
-
- fmtValidator *object.FormatValidator
-
- fmtValidatorOpts []object.FormatValidatorOption
-
- networkState netmap.State
-
- clientConstructor ClientConstructor
-
- log *logger.Logger
-}
-
-func defaultCfg() *cfg {
- return &cfg{
- remotePool: util.NewPseudoWorkerPool(),
- localPool: util.NewPseudoWorkerPool(),
- log: &logger.Logger{Logger: zap.L()},
+func NewService(ks *objutil.KeyStorage,
+ cc objectwriter.ClientConstructor,
+ ms objectwriter.MaxSizeSource,
+ os objectwriter.ObjectStorage,
+ cs container.Source,
+ ns netmap.Source,
+ nk netmap.AnnouncedKeys,
+ nst netmap.State,
+ ir objectwriter.InnerRing,
+ opts ...objectwriter.Option,
+) *Service {
+ c := &objectwriter.Config{
+ Logger: logger.NewLoggerWrapper(zap.L()),
+ KeyStorage: ks,
+ ClientConstructor: cc,
+ MaxSizeSrc: ms,
+ LocalStore: os,
+ ContainerSource: cs,
+ NetmapSource: ns,
+ NetmapKeys: nk,
+ NetworkState: nst,
}
-}
-
-func NewService(opts ...Option) *Service {
- c := defaultCfg()
for i := range opts {
opts[i](c)
}
- c.fmtValidator = object.NewFormatValidator(c.fmtValidatorOpts...)
+ c.FormatValidator = object.NewFormatValidator(
+ object.WithLockSource(os),
+ object.WithNetState(nst),
+ object.WithInnerRing(ir),
+ object.WithNetmapSource(ns),
+ object.WithContainersSource(cs),
+ object.WithVerifySessionTokenIssuer(c.VerifySessionTokenIssuer),
+ object.WithLogger(c.Logger),
+ )
return &Service{
- cfg: c,
+ Config: c,
}
}
-func (p *Service) Put(ctx context.Context) (*Streamer, error) {
+func (s *Service) Put() (*Streamer, error) {
return &Streamer{
- cfg: p.cfg,
- ctx: ctx,
+ Config: s.Config,
}, nil
}
-
-func WithKeyStorage(v *objutil.KeyStorage) Option {
- return func(c *cfg) {
- c.keyStorage = v
- }
-}
-
-func WithMaxSizeSource(v MaxSizeSource) Option {
- return func(c *cfg) {
- c.maxSizeSrc = v
- }
-}
-
-func WithObjectStorage(v ObjectStorage) Option {
- return func(c *cfg) {
- c.localStore = v
- c.fmtValidatorOpts = append(c.fmtValidatorOpts, object.WithLockSource(v))
- }
-}
-
-func WithContainerSource(v container.Source) Option {
- return func(c *cfg) {
- c.cnrSrc = v
- }
-}
-
-func WithNetworkMapSource(v netmap.Source) Option {
- return func(c *cfg) {
- c.netMapSrc = v
- }
-}
-
-func WithWorkerPools(remote, local util.WorkerPool) Option {
- return func(c *cfg) {
- c.remotePool, c.localPool = remote, local
- }
-}
-
-func WithNetmapKeys(v netmap.AnnouncedKeys) Option {
- return func(c *cfg) {
- c.netmapKeys = v
- }
-}
-
-func WithNetworkState(v netmap.State) Option {
- return func(c *cfg) {
- c.networkState = v
- c.fmtValidatorOpts = append(c.fmtValidatorOpts, object.WithNetState(v))
- }
-}
-
-func WithClientConstructor(v ClientConstructor) Option {
- return func(c *cfg) {
- c.clientConstructor = v
- }
-}
-
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
new file mode 100644
index 0000000000..90f4732542
--- /dev/null
+++ b/pkg/services/object/put/single.go
@@ -0,0 +1,363 @@
+package putsvc
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "hash"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
+ svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+)
+
+var errInvalidPayloadChecksum = errors.New("incorrect payload checksum")
+
+type putSingleRequestSigner struct {
+ req *objectAPI.PutSingleRequest
+ keyStorage *svcutil.KeyStorage
+ signer *sync.Once
+}
+
+func (s *putSingleRequestSigner) GetRequestWithSignedHeader() (*objectAPI.PutSingleRequest, error) {
+ var resErr error
+ s.signer.Do(func() {
+ metaHdr := new(sessionV2.RequestMetaHeader)
+ meta := s.req.GetMetaHeader()
+
+ metaHdr.SetTTL(meta.GetTTL() - 1)
+ metaHdr.SetOrigin(meta)
+ s.req.SetMetaHeader(metaHdr)
+
+ privateKey, err := s.keyStorage.GetKey(nil)
+ if err != nil {
+ resErr = err
+ return
+ }
+ resErr = signature.SignServiceMessage(privateKey, s.req)
+ })
+ return s.req, resErr
+}
+
+func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest) (*objectAPI.PutSingleResponse, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putsvc.PutSingle")
+ defer span.End()
+
+ obj := objectSDK.NewFromV2(req.GetBody().GetObject())
+
+ meta, err := s.validatePutSingle(ctx, obj)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := s.saveToNodes(ctx, obj, req, meta); err != nil {
+ return nil, err
+ }
+
+ resp := &objectAPI.PutSingleResponse{}
+ resp.SetBody(&objectAPI.PutSingleResponseBody{})
+ return resp, nil
+}
+
+func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
+ if err := s.validarePutSingleSize(ctx, obj); err != nil {
+ return object.ContentMeta{}, err
+ }
+
+ if err := s.validatePutSingleChecksum(obj); err != nil {
+ return object.ContentMeta{}, err
+ }
+
+ return s.validatePutSingleObject(ctx, obj)
+}
+
+func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error {
+ if uint64(len(obj.Payload())) != obj.PayloadSize() {
+ return target.ErrWrongPayloadSize
+ }
+
+ maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx)
+ if obj.PayloadSize() > maxAllowedSize {
+ return target.ErrExceedingMaxSize
+ }
+
+ return nil
+}
+
+func (s *Service) validatePutSingleChecksum(obj *objectSDK.Object) error {
+ cs, csSet := obj.PayloadChecksum()
+ if !csSet {
+ return errors.New("missing payload checksum")
+ }
+
+ var hash hash.Hash
+
+ switch typ := cs.Type(); typ {
+ default:
+ return fmt.Errorf("unsupported payload checksum type %v", typ)
+ case checksum.SHA256:
+ hash = sha256.New()
+ case checksum.TZ:
+ hash = tz.New()
+ }
+
+ if _, err := hash.Write(obj.Payload()); err != nil {
+ return fmt.Errorf("could not compute payload hash: %w", err)
+ }
+
+ if !bytes.Equal(hash.Sum(nil), cs.Value()) {
+ return errInvalidPayloadChecksum
+ }
+
+ return nil
+}
+
+func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
+ if err := s.FormatValidator.Validate(ctx, obj, false); err != nil {
+ return object.ContentMeta{}, fmt.Errorf("coud not validate object format: %w", err)
+ }
+
+ meta, err := s.FormatValidator.ValidateContent(obj)
+ if err != nil {
+ return object.ContentMeta{}, fmt.Errorf("could not validate payload content: %w", err)
+ }
+
+ return meta, nil
+}
+
+func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
+ localOnly := req.GetMetaHeader().GetTTL() <= 1
+ placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly)
+ if err != nil {
+ return err
+ }
+
+ if placement.isEC {
+ return s.saveToECReplicas(ctx, placement, obj, req, meta)
+ }
+
+ return s.saveToREPReplicas(ctx, placement, obj, localOnly, req, meta)
+}
+
+func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
+ iter := s.NewNodeIterator(placement.placementOptions)
+ iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
+ iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
+
+ signer := &putSingleRequestSigner{
+ req: req,
+ keyStorage: s.KeyStorage,
+ signer: &sync.Once{},
+ }
+
+ return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error {
+ return s.saveToPlacementNode(ctx, &nd, obj, signer, meta, placement.container)
+ })
+}
+
+func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
+ commonPrm, err := svcutil.CommonPrmFromV2(req)
+ if err != nil {
+ return err
+ }
+ key, err := s.KeyStorage.GetKey(nil)
+ if err != nil {
+ return err
+ }
+ signer := &putSingleRequestSigner{
+ req: req,
+ keyStorage: s.KeyStorage,
+ signer: &sync.Once{},
+ }
+
+ w := objectwriter.ECWriter{
+ Config: s.Config,
+ PlacementOpts: placement.placementOptions,
+ ObjectMeta: meta,
+ ObjectMetaValid: true,
+ CommonPrm: commonPrm,
+ Container: placement.container,
+ Key: key,
+ Relay: func(ctx context.Context, ni client.NodeInfo, mac client.MultiAddressClient) error {
+ return s.redirectPutSingleRequest(ctx, signer, obj, ni, mac)
+ },
+ }
+ return w.WriteObject(ctx, obj)
+}
+
+type putSinglePlacement struct {
+ placementOptions []placement.Option
+ isEC bool
+ container containerSDK.Container
+ resetSuccessAfterOnBroadcast bool
+}
+
+func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
+ var result putSinglePlacement
+
+ cnrID, ok := obj.ContainerID()
+ if !ok {
+ return result, errors.New("missing container ID")
+ }
+ cnrInfo, err := s.ContainerSource.Get(ctx, cnrID)
+ if err != nil {
+ return result, fmt.Errorf("could not get container by ID: %w", err)
+ }
+ result.container = cnrInfo.Value
+ result.isEC = container.IsECContainer(cnrInfo.Value) && object.IsECSupported(obj)
+ if len(copiesNumber) > 0 && !result.isEC {
+ result.placementOptions = append(result.placementOptions, placement.WithCopyNumbers(copiesNumber))
+ }
+ if container.IsECContainer(cnrInfo.Value) && !object.IsECSupported(obj) && !localOnly {
+ result.placementOptions = append(result.placementOptions, placement.SuccessAfter(uint32(policy.ECParityCount(cnrInfo.Value.PlacementPolicy())+1)))
+ result.resetSuccessAfterOnBroadcast = true
+ }
+ result.placementOptions = append(result.placementOptions, placement.ForContainer(cnrInfo.Value))
+
+ objID, ok := obj.ID()
+ if !ok {
+ return result, errors.New("missing object ID")
+ }
+ if obj.ECHeader() != nil {
+ objID = obj.ECHeader().Parent()
+ }
+ result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
+
+ latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource)
+ if err != nil {
+ return result, fmt.Errorf("could not get latest network map: %w", err)
+ }
+ builder := placement.NewNetworkMapBuilder(latestNetmap)
+ if localOnly {
+ result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
+ builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys)
+ }
+ result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
+ return result, nil
+}
+
+func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object,
+ signer *putSingleRequestSigner, meta object.ContentMeta, container containerSDK.Container,
+) error {
+ if nodeDesc.Local {
+ return s.saveLocal(ctx, obj, meta, container)
+ }
+
+ var info client.NodeInfo
+
+ client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
+
+ c, err := s.ClientConstructor.Get(info)
+ if err != nil {
+ return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
+ }
+
+ return s.redirectPutSingleRequest(ctx, signer, obj, info, c)
+}
+
+func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
+ localTarget := &objectwriter.LocalTarget{
+ Storage: s.LocalStore,
+ Container: container,
+ }
+ return localTarget.WriteObject(ctx, obj, meta)
+}
+
+func (s *Service) redirectPutSingleRequest(ctx context.Context,
+ signer *putSingleRequestSigner,
+ obj *objectSDK.Object,
+ info client.NodeInfo,
+ c client.MultiAddressClient,
+) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putService.redirectPutSingleRequest")
+ defer span.End()
+
+ var req *objectAPI.PutSingleRequest
+ var firstErr error
+ req, firstErr = signer.GetRequestWithSignedHeader()
+ if firstErr != nil {
+ return firstErr
+ }
+
+ info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putService.redirectPutSingleRequest.IterateAddresses",
+ trace.WithAttributes(
+ attribute.String("address", addr.String())))
+ defer span.End()
+
+ var err error
+
+ defer func() {
+ if err != nil {
+ objID, _ := obj.ID()
+ cnrID, _ := obj.ContainerID()
+ s.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
+ zap.Error(err),
+ zap.Stringer("address", addr),
+ zap.Stringer("object_id", objID),
+ zap.Stringer("container_id", cnrID),
+ )
+ }
+
+ stop = err == nil
+ if stop || firstErr == nil {
+ firstErr = err
+ }
+ }()
+
+ var resp *objectAPI.PutSingleResponse
+
+ err = c.RawForAddress(ctx, addr, func(cli *rawclient.Client) error {
+ var e error
+ resp, e = rpc.PutSingleObject(cli, req, rawclient.WithContext(ctx))
+ return e
+ })
+ if err != nil {
+ err = fmt.Errorf("failed to execute request: %w", err)
+ return
+ }
+
+ if err = internal.VerifyResponseKeyV2(info.PublicKey(), resp); err != nil {
+ return
+ }
+
+ err = signature.VerifyServiceMessage(resp)
+ if err != nil {
+ err = fmt.Errorf("response verification failed: %w", err)
+ return
+ }
+
+ st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus())
+ err = apistatus.ErrFromStatus(st)
+
+ return
+ })
+
+ return firstErr
+}
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index ae1a2f451b..19768b7fae 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -2,275 +2,74 @@ package putsvc
import (
"context"
- "crypto/ecdsa"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
)
type Streamer struct {
- *cfg
+ *objectwriter.Config
- ctx context.Context
-
- sessionKey *ecdsa.PrivateKey
-
- target transformer.ObjectTarget
-
- relay func(client.NodeInfo, client.MultiAddressClient) error
-
- maxPayloadSz uint64 // network config
+ target transformer.ChunkedObjectWriter
}
var errNotInit = errors.New("stream not initialized")
var errInitRecall = errors.New("init recall")
-func (p *Streamer) Init(prm *PutInitPrm) error {
+func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
+ if p.target != nil {
+ return errInitRecall
+ }
+
// initialize destination target
- if err := p.initTarget(prm); err != nil {
+ prmTarget := objectwriter.Params{
+ Config: p.Config,
+ Common: prm.common,
+ Header: prm.hdr,
+ Container: prm.cnr,
+ TraverseOpts: prm.traverseOpts,
+ Relay: prm.relay,
+ }
+
+ var err error
+ p.target, err = target.New(ctx, prmTarget)
+ if err != nil {
return fmt.Errorf("(%T) could not initialize object target: %w", p, err)
}
- if err := p.target.WriteHeader(prm.hdr); err != nil {
+ if err := p.target.WriteHeader(ctx, prm.hdr); err != nil {
return fmt.Errorf("(%T) could not write header to target: %w", p, err)
}
return nil
}
-// MaxObjectSize returns maximum payload size for the streaming session.
-//
-// Must be called after the successful Init.
-func (p *Streamer) MaxObjectSize() uint64 {
- return p.maxPayloadSz
-}
-
-func (p *Streamer) initTarget(prm *PutInitPrm) error {
- // prevent re-calling
- if p.target != nil {
- return errInitRecall
- }
-
- // prepare needed put parameters
- if err := p.preparePrm(prm); err != nil {
- return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err)
- }
-
- p.maxPayloadSz = p.maxSizeSrc.MaxObjectSize()
- if p.maxPayloadSz == 0 {
- return fmt.Errorf("(%T) could not obtain max object size parameter", p)
- }
-
- if prm.hdr.Signature() != nil {
- p.relay = prm.relay
-
- // prepare untrusted-Put object target
- p.target = &validatingTarget{
- nextTarget: p.newCommonTarget(prm),
- fmt: p.fmtValidator,
-
- maxPayloadSz: p.maxPayloadSz,
- }
-
- return nil
- }
-
- sToken := prm.common.SessionToken()
-
- // prepare trusted-Put object target
-
- // get private token from local storage
- var sessionInfo *util.SessionInfo
-
- if sToken != nil {
- sessionInfo = &util.SessionInfo{
- ID: sToken.ID(),
- Owner: sToken.Issuer(),
- }
- }
-
- sessionKey, err := p.keyStorage.GetKey(sessionInfo)
- if err != nil {
- return fmt.Errorf("(%T) could not receive session key: %w", p, err)
- }
-
- // In case session token is missing, the line above returns the default key.
- // If it isn't owner key, replication attempts will fail, thus this check.
- if sToken == nil {
- ownerObj := prm.hdr.OwnerID()
- if ownerObj == nil {
- return errors.New("missing object owner")
- }
-
- var ownerSession user.ID
- user.IDFromKey(&ownerSession, sessionKey.PublicKey)
-
- if !ownerObj.Equals(ownerSession) {
- return fmt.Errorf("(%T) session token is missing but object owner id is different from the default key", p)
- }
- }
-
- p.sessionKey = sessionKey
- p.target = &validatingTarget{
- fmt: p.fmtValidator,
- unpreparedObject: true,
- nextTarget: transformer.NewPayloadSizeLimiter(
- p.maxPayloadSz,
- containerSDK.IsHomomorphicHashingDisabled(prm.cnr),
- func() transformer.ObjectTarget {
- return transformer.NewFormatTarget(&transformer.FormatterParams{
- Key: sessionKey,
- NextTarget: p.newCommonTarget(prm),
- SessionToken: sToken,
- NetworkState: p.networkState,
- })
- },
- ),
- }
-
- return nil
-}
-
-func (p *Streamer) preparePrm(prm *PutInitPrm) error {
- var err error
-
- // get latest network map
- nm, err := netmap.GetLatestNetworkMap(p.netMapSrc)
- if err != nil {
- return fmt.Errorf("(%T) could not get latest network map: %w", p, err)
- }
-
- idCnr, ok := prm.hdr.ContainerID()
- if !ok {
- return errors.New("missing container ID")
- }
-
- // get container to store the object
- cnrInfo, err := p.cnrSrc.Get(idCnr)
- if err != nil {
- return fmt.Errorf("(%T) could not get container by ID: %w", p, err)
- }
-
- prm.cnr = cnrInfo.Value
-
- // add common options
- prm.traverseOpts = append(prm.traverseOpts,
- // set processing container
- placement.ForContainer(prm.cnr),
- )
-
- if id, ok := prm.hdr.ID(); ok {
- prm.traverseOpts = append(prm.traverseOpts,
- // set identifier of the processing object
- placement.ForObject(id),
- )
- }
-
- // create placement builder from network map
- builder := placement.NewNetworkMapBuilder(nm)
-
- if prm.common.LocalOnly() {
- // restrict success count to 1 stored copy (to local storage)
- prm.traverseOpts = append(prm.traverseOpts, placement.SuccessAfter(1))
-
- // use local-only placement builder
- builder = util.NewLocalPlacement(builder, p.netmapKeys)
- }
-
- // set placement builder
- prm.traverseOpts = append(prm.traverseOpts, placement.UseBuilder(builder))
-
- return nil
-}
-
-func (p *Streamer) newCommonTarget(prm *PutInitPrm) transformer.ObjectTarget {
- var relay func(nodeDesc) error
- if p.relay != nil {
- relay = func(node nodeDesc) error {
- var info client.NodeInfo
-
- client.NodeInfoFromNetmapElement(&info, node.info)
-
- c, err := p.clientConstructor.Get(info)
- if err != nil {
- return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
- }
-
- return p.relay(info, c)
- }
- }
-
- // enable additional container broadcast on non-local operation
- // if object has TOMBSTONE or LOCK type.
- typ := prm.hdr.Type()
- withBroadcast := !prm.common.LocalOnly() && (typ == object.TypeTombstone || typ == object.TypeLock)
-
- return &distributedTarget{
- traversal: traversal{
- opts: prm.traverseOpts,
-
- extraBroadcastEnabled: withBroadcast,
- },
- payload: getPayload(),
- remotePool: p.remotePool,
- localPool: p.localPool,
- nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget {
- if node.local {
- return &localTarget{
- storage: p.localStore,
- }
- }
-
- rt := &remoteTarget{
- ctx: p.ctx,
- privateKey: p.sessionKey,
- commonPrm: prm.common,
- clientConstructor: p.clientConstructor,
- }
-
- client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.info)
-
- return rt
- },
- relay: relay,
- fmt: p.fmtValidator,
- log: p.log,
-
- isLocalKey: p.netmapKeys.IsLocalKey,
- }
-}
-
-func (p *Streamer) SendChunk(prm *PutChunkPrm) error {
+func (p *Streamer) SendChunk(ctx context.Context, prm *PutChunkPrm) error {
if p.target == nil {
return errNotInit
}
- if _, err := p.target.Write(prm.chunk); err != nil {
+ if _, err := p.target.Write(ctx, prm.chunk); err != nil {
return fmt.Errorf("(%T) could not write payload chunk to target: %w", p, err)
}
return nil
}
-func (p *Streamer) Close() (*PutResponse, error) {
+func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) {
if p.target == nil {
return nil, errNotInit
}
- ids, err := p.target.Close()
+ ids, err := p.target.Close(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not close object target: %w", p, err)
}
- id := ids.ParentID()
+ id := ids.ParentID
if id != nil {
return &PutResponse{
id: *id,
@@ -278,6 +77,6 @@ func (p *Streamer) Close() (*PutResponse, error) {
}
return &PutResponse{
- id: ids.SelfID(),
+ id: ids.SelfID,
}, nil
}
diff --git a/pkg/services/object/put/v2/service.go b/pkg/services/object/put/v2/service.go
index 7d0dfc6135..78d4c711d7 100644
--- a/pkg/services/object/put/v2/service.go
+++ b/pkg/services/object/put/v2/service.go
@@ -7,37 +7,26 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Put operation of Object service v2.
type Service struct {
- *cfg
-}
-
-// Option represents Service constructor option.
-type Option func(*cfg)
-
-type cfg struct {
svc *putsvc.Service
keyStorage *util.KeyStorage
}
// NewService constructs Service instance from provided options.
-func NewService(opts ...Option) *Service {
- c := new(cfg)
-
- for i := range opts {
- opts[i](c)
- }
-
+func NewService(svc *putsvc.Service, ks *util.KeyStorage) *Service {
return &Service{
- cfg: c,
+ svc: svc,
+ keyStorage: ks,
}
}
// Put calls internal service and returns v2 object streamer.
-func (s *Service) Put(ctx context.Context) (object.PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *Service) Put() (object.PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("(%T) could not open object put stream: %w", s, err)
}
@@ -48,14 +37,6 @@ func (s *Service) Put(ctx context.Context) (object.PutObjectStream, error) {
}, nil
}
-func WithInternalService(v *putsvc.Service) Option {
- return func(c *cfg) {
- c.svc = v
- }
-}
-
-func WithKeyStorage(ks *util.KeyStorage) Option {
- return func(c *cfg) {
- c.keyStorage = ks
- }
+func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest) (*objectAPI.PutSingleResponse, error) {
+ return s.svc.PutSingle(ctx, req)
}
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index 85827cd4ce..f0c6481878 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -1,19 +1,24 @@
package putsvc
import (
+ "context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type streamer struct {
@@ -32,7 +37,10 @@ type sizes struct {
writtenPayload uint64 // sum size of already cached chunks
}
-func (s *streamer) Send(req *object.PutRequest) (err error) {
+func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.Send")
+ defer span.End()
+
switch v := req.GetBody().GetObjectPart().(type) {
case *object.PutObjectPartInit:
var initPrm *putsvc.PutInitPrm
@@ -42,21 +50,21 @@ func (s *streamer) Send(req *object.PutRequest) (err error) {
return err
}
- if err = s.stream.Init(initPrm); err != nil {
+ if err = s.stream.Init(ctx, initPrm); err != nil {
err = fmt.Errorf("(%T) could not init object put stream: %w", s, err)
}
s.saveChunks = v.GetSignature() != nil
if s.saveChunks {
- maxSz := s.stream.MaxObjectSize()
+ maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx)
s.sizes = &sizes{
- payloadSz: uint64(v.GetHeader().GetPayloadLength()),
+ payloadSz: v.GetHeader().GetPayloadLength(),
}
// check payload size limit overflow
if s.payloadSz > maxSz {
- return putsvc.ErrExceedingMaxSize
+ return target.ErrExceedingMaxSize
}
s.init = req
@@ -67,11 +75,11 @@ func (s *streamer) Send(req *object.PutRequest) (err error) {
// check payload size overflow
if s.writtenPayload > s.payloadSz {
- return putsvc.ErrWrongPayloadSize
+ return target.ErrWrongPayloadSize
}
}
- if err = s.stream.SendChunk(toChunkPrm(v)); err != nil {
+ if err = s.stream.SendChunk(ctx, toChunkPrm(v)); err != nil {
err = fmt.Errorf("(%T) could not send payload chunk: %w", s, err)
}
@@ -103,15 +111,18 @@ func (s *streamer) Send(req *object.PutRequest) (err error) {
return signature.SignServiceMessage(key, req)
}
-func (s *streamer) CloseAndRecv() (*object.PutResponse, error) {
+func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.CloseAndRecv")
+ defer span.End()
+
if s.saveChunks {
// check payload size correctness
if s.writtenPayload != s.payloadSz {
- return nil, putsvc.ErrWrongPayloadSize
+ return nil, target.ErrWrongPayloadSize
}
}
- resp, err := s.stream.Close()
+ resp, err := s.stream.Close(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not object put stream: %w", s, err)
}
@@ -119,7 +130,10 @@ func (s *streamer) CloseAndRecv() (*object.PutResponse, error) {
return fromPutResponse(resp), nil
}
-func (s *streamer) relayRequest(info client.NodeInfo, c client.MultiAddressClient) error {
+func (s *streamer) relayRequest(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.relayRequest")
+ defer span.End()
+
// open stream
resp := new(object.PutResponse)
@@ -128,6 +142,12 @@ func (s *streamer) relayRequest(info client.NodeInfo, c client.MultiAddressClien
var firstErr error
info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.iterateAddress",
+ trace.WithAttributes(
+ attribute.String("address", addr.String()),
+ ))
+ defer span.End()
+
var err error
defer func() {
@@ -142,8 +162,8 @@ func (s *streamer) relayRequest(info client.NodeInfo, c client.MultiAddressClien
var stream *rpc.PutRequestWriter
- err = c.RawForAddress(addr, func(cli *rawclient.Client) error {
- stream, err = rpc.PutObject(cli, resp)
+ err = c.RawForAddress(ctx, addr, func(cli *rawclient.Client) error {
+ stream, err = rpc.PutObject(cli, resp, rawclient.WithContext(ctx))
return err
})
if err != nil {
diff --git a/pkg/services/object/put/v2/util.go b/pkg/services/object/put/v2/util.go
index 790f061f18..5ec9ebe107 100644
--- a/pkg/services/object/put/v2/util.go
+++ b/pkg/services/object/put/v2/util.go
@@ -1,11 +1,11 @@
package putsvc
import (
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
func (s *streamer) toInitPrm(part *objectV2.PutObjectPartInit, req *objectV2.PutRequest) (*putsvc.PutInitPrm, error) {
@@ -21,10 +21,11 @@ func (s *streamer) toInitPrm(part *objectV2.PutObjectPartInit, req *objectV2.Put
return new(putsvc.PutInitPrm).
WithObject(
- object.NewFromV2(oV2),
+ objectSDK.NewFromV2(oV2),
).
WithRelay(s.relayRequest).
- WithCommonPrm(commonPrm), nil
+ WithCommonPrm(commonPrm).
+ WithCopyNumbers(part.GetCopiesNumber()), nil
}
func toChunkPrm(req *objectV2.PutObjectPartChunk) *putsvc.PutChunkPrm {
diff --git a/pkg/services/object/put/validation.go b/pkg/services/object/put/validation.go
deleted file mode 100644
index 2d6ada5a19..0000000000
--- a/pkg/services/object/put/validation.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package putsvc
-
-import (
- "bytes"
- "crypto/sha256"
- "errors"
- "fmt"
- "hash"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
-)
-
-// validatingTarget validates object format and content.
-type validatingTarget struct {
- nextTarget transformer.ObjectTarget
-
- fmt *object.FormatValidator
-
- unpreparedObject bool
-
- hash hash.Hash
-
- checksum []byte
-
- maxPayloadSz uint64 // network config
-
- payloadSz uint64 // payload size of the streaming object from header
-
- writtenPayload uint64 // number of already written payload bytes
-}
-
-var (
- // ErrExceedingMaxSize is returned when payload size is greater than the limit.
- ErrExceedingMaxSize = errors.New("payload size is greater than the limit")
- // ErrWrongPayloadSize is returned when chunk payload size is greater than the length declared in header.
- ErrWrongPayloadSize = errors.New("wrong payload size")
-)
-
-func (t *validatingTarget) WriteHeader(obj *objectSDK.Object) error {
- t.payloadSz = obj.PayloadSize()
- chunkLn := uint64(len(obj.Payload()))
-
- if !t.unpreparedObject {
- // check chunk size
- if chunkLn > t.payloadSz {
- return ErrWrongPayloadSize
- }
-
- // check payload size limit
- if t.payloadSz > t.maxPayloadSz {
- return ErrExceedingMaxSize
- }
-
- cs, csSet := obj.PayloadChecksum()
- if !csSet {
- return errors.New("missing payload checksum")
- }
-
- switch typ := cs.Type(); typ {
- default:
- return fmt.Errorf("(%T) unsupported payload checksum type %v", t, typ)
- case checksum.SHA256:
- t.hash = sha256.New()
- case checksum.TZ:
- t.hash = tz.New()
- }
-
- t.checksum = cs.Value()
- }
-
- if err := t.fmt.Validate(obj, t.unpreparedObject); err != nil {
- return fmt.Errorf("(%T) coult not validate object format: %w", t, err)
- }
-
- err := t.nextTarget.WriteHeader(obj)
- if err != nil {
- return err
- }
-
- if !t.unpreparedObject {
- // update written bytes
- //
- // Note: we MUST NOT add obj.PayloadSize() since obj
- // can carry only the chunk of the full payload
- t.writtenPayload += chunkLn
- }
-
- return nil
-}
-
-func (t *validatingTarget) Write(p []byte) (n int, err error) {
- chunkLn := uint64(len(p))
-
- if !t.unpreparedObject {
- // check if new chunk will overflow payload size
- if t.writtenPayload+chunkLn > t.payloadSz {
- return 0, ErrWrongPayloadSize
- }
-
- _, err = t.hash.Write(p)
- if err != nil {
- return
- }
- }
-
- n, err = t.nextTarget.Write(p)
- if err == nil {
- t.writtenPayload += uint64(n)
- }
-
- return
-}
-
-func (t *validatingTarget) Close() (*transformer.AccessIdentifiers, error) {
- if !t.unpreparedObject {
- // check payload size correctness
- if t.payloadSz != t.writtenPayload {
- return nil, ErrWrongPayloadSize
- }
-
- if !bytes.Equal(t.hash.Sum(nil), t.checksum) {
- return nil, fmt.Errorf("(%T) incorrect payload checksum", t)
- }
- }
-
- return t.nextTarget.Close()
-}
diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go
new file mode 100644
index 0000000000..01eb1ea8da
--- /dev/null
+++ b/pkg/services/object/qos.go
@@ -0,0 +1,145 @@
+package object
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+)
+
+var _ ServiceServer = (*qosObjectService)(nil)
+
+type AdjustIOTag interface {
+ AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
+}
+
+type qosObjectService struct {
+ next ServiceServer
+ adj AdjustIOTag
+}
+
+func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer {
+ return &qosObjectService{
+ next: next,
+ adj: adjIOTag,
+ }
+}
+
+func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Delete(ctx, req)
+}
+
+func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error {
+ ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Get(req, &qosReadStream[*object.GetResponse]{
+ ctxF: func() context.Context { return ctx },
+ sender: s,
+ })
+}
+
+func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error {
+ ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{
+ ctxF: func() context.Context { return ctx },
+ sender: s,
+ })
+}
+
+func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.GetRangeHash(ctx, req)
+}
+
+func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Head(ctx, req)
+}
+
+func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ s, err := q.next.Patch(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{
+ s: s,
+ adj: q.adj,
+ }, nil
+}
+
+func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) {
+ s, err := q.next.Put(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &qosWriteStream[*object.PutRequest, *object.PutResponse]{
+ s: s,
+ adj: q.adj,
+ }, nil
+}
+
+func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.PutSingle(ctx, req)
+}
+
+func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error {
+ ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
+ return q.next.Search(req, &qosReadStream[*object.SearchResponse]{
+ ctxF: func() context.Context { return ctx },
+ sender: s,
+ })
+}
+
+type qosSend[T any] interface {
+ Send(T) error
+}
+
+type qosReadStream[T any] struct {
+ sender qosSend[T]
+ ctxF func() context.Context
+}
+
+func (g *qosReadStream[T]) Context() context.Context {
+ return g.ctxF()
+}
+
+func (g *qosReadStream[T]) Send(resp T) error {
+ return g.sender.Send(resp)
+}
+
+type qosVerificationHeader interface {
+ GetVerificationHeader() *session.RequestVerificationHeader
+}
+
+type qosSendRecv[TReq qosVerificationHeader, TResp any] interface {
+ Send(context.Context, TReq) error
+ CloseAndRecv(context.Context) (TResp, error)
+}
+
+type qosWriteStream[TReq qosVerificationHeader, TResp any] struct {
+ s qosSendRecv[TReq, TResp]
+ adj AdjustIOTag
+
+ ioTag string
+ ioTagDefined bool
+}
+
+func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) {
+ if q.ioTagDefined {
+ ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
+ }
+ return q.s.CloseAndRecv(ctx)
+}
+
+func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error {
+ if !q.ioTagDefined {
+ ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
+ q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx)
+ }
+ assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment")
+ ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
+ return q.s.Send(ctx, req)
+}
diff --git a/pkg/services/object/remote_reader.go b/pkg/services/object/remote_reader.go
new file mode 100644
index 0000000000..bc6ffd1601
--- /dev/null
+++ b/pkg/services/object/remote_reader.go
@@ -0,0 +1,141 @@
+package object
+
+import (
+ "context"
+ "fmt"
+
+ clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type ClientConstructor interface {
+ Get(clientcore.NodeInfo) (clientcore.MultiAddressClient, error)
+}
+
+// RemoteReader represents utility for getting
+// the object from a remote host.
+type RemoteReader struct {
+ keyStorage *util.KeyStorage
+
+ clientCache ClientConstructor
+}
+
+// RemoteRequestPrm groups remote operation parameters.
+type RemoteRequestPrm struct {
+ addr oid.Address
+ raw bool
+ node netmap.NodeInfo
+}
+
+const remoteOpTTL = 1
+
+// NewRemoteReader creates, initializes and returns new RemoteHeader instance.
+func NewRemoteReader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteReader {
+ return &RemoteReader{
+ keyStorage: keyStorage,
+ clientCache: cache,
+ }
+}
+
+// WithNodeInfo sets information about the remote node.
+func (p *RemoteRequestPrm) WithNodeInfo(v netmap.NodeInfo) *RemoteRequestPrm {
+ if p != nil {
+ p.node = v
+ }
+
+ return p
+}
+
+// WithObjectAddress sets object address.
+func (p *RemoteRequestPrm) WithObjectAddress(v oid.Address) *RemoteRequestPrm {
+ if p != nil {
+ p.addr = v
+ }
+
+ return p
+}
+
+func (p *RemoteRequestPrm) WithRaw(v bool) *RemoteRequestPrm {
+ if p != nil {
+ p.raw = v
+ }
+ return p
+}
+
+// Head requests object header from the remote node.
+func (h *RemoteReader) Head(ctx context.Context, prm *RemoteRequestPrm) (*objectSDK.Object, error) {
+ key, err := h.keyStorage.GetKey(nil)
+ if err != nil {
+ return nil, fmt.Errorf("(%T) could not receive private key: %w", h, err)
+ }
+
+ var info clientcore.NodeInfo
+
+ err = clientcore.NodeInfoFromRawNetmapElement(&info, netmapCore.Node(prm.node))
+ if err != nil {
+ return nil, fmt.Errorf("parse client node info: %w", err)
+ }
+
+ c, err := h.clientCache.Get(info)
+ if err != nil {
+ return nil, fmt.Errorf("(%T) could not create SDK client %s: %w", h, info.AddressGroup(), err)
+ }
+
+ var headPrm internalclient.HeadObjectPrm
+
+ headPrm.SetClient(c)
+ headPrm.SetPrivateKey(key)
+ headPrm.SetAddress(prm.addr)
+ headPrm.SetTTL(remoteOpTTL)
+ if prm.raw {
+ headPrm.SetRawFlag()
+ }
+
+ res, err := internalclient.HeadObject(ctx, headPrm)
+ if err != nil {
+ return nil, fmt.Errorf("(%T) could not head object in %s: %w", h, info.AddressGroup(), err)
+ }
+
+ return res.Header(), nil
+}
+
+func (h *RemoteReader) Get(ctx context.Context, prm *RemoteRequestPrm) (*objectSDK.Object, error) {
+ key, err := h.keyStorage.GetKey(nil)
+ if err != nil {
+ return nil, fmt.Errorf("(%T) could not receive private key: %w", h, err)
+ }
+
+ var info clientcore.NodeInfo
+
+ err = clientcore.NodeInfoFromRawNetmapElement(&info, netmapCore.Node(prm.node))
+ if err != nil {
+ return nil, fmt.Errorf("parse client node info: %w", err)
+ }
+
+ c, err := h.clientCache.Get(info)
+ if err != nil {
+ return nil, fmt.Errorf("(%T) could not create SDK client %s: %w", h, info.AddressGroup(), err)
+ }
+
+ var getPrm internalclient.GetObjectPrm
+
+ getPrm.SetClient(c)
+ getPrm.SetPrivateKey(key)
+ getPrm.SetAddress(prm.addr)
+ getPrm.SetTTL(remoteOpTTL)
+ if prm.raw {
+ getPrm.SetRawFlag()
+ }
+
+ res, err := internalclient.GetObject(ctx, getPrm)
+ if err != nil {
+ return nil, fmt.Errorf("(%T) could not head object in %s: %w", h, info.AddressGroup(), err)
+ }
+
+ return res.Object(), nil
+}
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index 4da2b23a7b..80c971e8f7 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -4,9 +4,8 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type ResponseService struct {
@@ -16,25 +15,31 @@ type ResponseService struct {
}
type searchStreamResponser struct {
- util.ServerStream
+ SearchStream
- respWriter util.ResponseMessageWriter
+ respSvc *response.Service
}
type getStreamResponser struct {
- util.ServerStream
+ GetObjectStream
- respWriter util.ResponseMessageWriter
+ respSvc *response.Service
}
type getRangeStreamResponser struct {
- util.ServerStream
+ GetObjectRangeStream
- respWriter util.ResponseMessageWriter
+ respSvc *response.Service
}
type putStreamResponser struct {
- stream *response.ClientMessageStreamer
+ stream PutObjectStream
+ respSvc *response.Service
+}
+
+type patchStreamResponser struct {
+ stream PatchObjectStream
+ respSvc *response.Service
}
// NewResponseService returns object service instance that passes internal service
@@ -47,29 +52,32 @@ func NewResponseService(objSvc ServiceServer, respSvc *response.Service) *Respon
}
func (s *getStreamResponser) Send(resp *object.GetResponse) error {
- return s.respWriter(resp)
+ s.respSvc.SetMeta(resp)
+ return s.GetObjectStream.Send(resp)
}
func (s *ResponseService) Get(req *object.GetRequest, stream GetObjectStream) error {
return s.svc.Get(req, &getStreamResponser{
- ServerStream: stream,
- respWriter: s.respSvc.HandleServerStreamRequest(func(resp util.ResponseMessage) error {
- return stream.Send(resp.(*object.GetResponse))
- }),
+ GetObjectStream: stream,
+ respSvc: s.respSvc,
})
}
-func (s *putStreamResponser) Send(req *object.PutRequest) error {
- return s.stream.Send(req)
+func (s *putStreamResponser) Send(ctx context.Context, req *object.PutRequest) error {
+ if err := s.stream.Send(ctx, req); err != nil {
+ return fmt.Errorf("could not send the request: %w", err)
+ }
+ return nil
}
-func (s *putStreamResponser) CloseAndRecv() (*object.PutResponse, error) {
- r, err := s.stream.CloseAndRecv()
+func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ r, err := s.stream.CloseAndRecv(ctx)
if err != nil {
- return nil, fmt.Errorf("(%T) could not receive response: %w", s, err)
+ return nil, fmt.Errorf("could not close stream and receive response: %w", err)
}
- return r.(*object.PutResponse), nil
+ s.respSvc.SetMeta(r)
+ return r, nil
}
func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
@@ -79,78 +87,100 @@ func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
}
return &putStreamResponser{
- stream: s.respSvc.CreateRequestStreamer(
- func(req any) error {
- return stream.Send(req.(*object.PutRequest))
- },
- func() (util.ResponseMessage, error) {
- return stream.CloseAndRecv()
- },
- ),
+ stream: stream,
+ respSvc: s.respSvc,
}, nil
}
-func (s *ResponseService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Head(ctx, req.(*object.HeadRequest))
- },
- )
+func (s *patchStreamResponser) Send(ctx context.Context, req *object.PatchRequest) error {
+ if err := s.stream.Send(ctx, req); err != nil {
+ return fmt.Errorf("could not send the request: %w", err)
+ }
+ return nil
+}
+
+func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
+ r, err := s.stream.CloseAndRecv(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("could not close stream and receive response: %w", err)
+ }
+
+ s.respSvc.SetMeta(r)
+ return r, nil
+}
+
+func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ stream, err := s.svc.Patch(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("could not create Put object streamer: %w", err)
+ }
+
+ return &patchStreamResponser{
+ stream: stream,
+ respSvc: s.respSvc,
+ }, nil
+}
+
+func (s *ResponseService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
+ resp, err := s.svc.PutSingle(ctx, req)
if err != nil {
return nil, err
}
- return resp.(*object.HeadResponse), nil
+ s.respSvc.SetMeta(resp)
+ return resp, nil
+}
+
+func (s *ResponseService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
+ resp, err := s.svc.Head(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ s.respSvc.SetMeta(resp)
+ return resp, nil
}
func (s *searchStreamResponser) Send(resp *object.SearchResponse) error {
- return s.respWriter(resp)
+ s.respSvc.SetMeta(resp)
+ return s.SearchStream.Send(resp)
}
func (s *ResponseService) Search(req *object.SearchRequest, stream SearchStream) error {
return s.svc.Search(req, &searchStreamResponser{
- ServerStream: stream,
- respWriter: s.respSvc.HandleServerStreamRequest(func(resp util.ResponseMessage) error {
- return stream.Send(resp.(*object.SearchResponse))
- }),
+ SearchStream: stream,
+ respSvc: s.respSvc,
})
}
func (s *ResponseService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Delete(ctx, req.(*object.DeleteRequest))
- },
- )
+ resp, err := s.svc.Delete(ctx, req)
if err != nil {
return nil, err
}
- return resp.(*object.DeleteResponse), nil
+ s.respSvc.SetMeta(resp)
+ return resp, nil
}
func (s *getRangeStreamResponser) Send(resp *object.GetRangeResponse) error {
- return s.respWriter(resp)
+ s.respSvc.SetMeta(resp)
+ return s.GetObjectRangeStream.Send(resp)
}
func (s *ResponseService) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error {
return s.svc.GetRange(req, &getRangeStreamResponser{
- ServerStream: stream,
- respWriter: s.respSvc.HandleServerStreamRequest(func(resp util.ResponseMessage) error {
- return stream.Send(resp.(*object.GetRangeResponse))
- }),
+ GetObjectRangeStream: stream,
+ respSvc: s.respSvc,
})
}
func (s *ResponseService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.GetRangeHash(ctx, req.(*object.GetRangeHashRequest))
- },
- )
+ resp, err := s.svc.GetRangeHash(ctx, req)
if err != nil {
return nil, err
}
- return resp.(*object.GetRangeHashResponse), nil
+ s.respSvc.SetMeta(resp)
+ return resp, nil
}
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index fffcba42ac..60d469b11d 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -3,32 +3,29 @@ package searchsvc
import (
"context"
"encoding/hex"
+ "fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"go.uber.org/zap"
)
-func (exec *execCtx) executeOnContainer() {
- if exec.isLocal() {
- exec.log.Debug("return result directly")
- return
- }
-
+func (exec *execCtx) executeOnContainer(ctx context.Context) error {
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug("trying to execute in container...",
+ exec.log.Debug(ctx, logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- ok := exec.initEpoch()
- if !ok {
- return
+ if err := exec.initEpoch(ctx); err != nil {
+ return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err)
}
for {
- if exec.processCurrentEpoch() {
+ if err := exec.processCurrentEpoch(ctx); err != nil {
break
}
@@ -43,27 +40,26 @@ func (exec *execCtx) executeOnContainer() {
exec.curProcEpoch--
}
- exec.status = statusOK
- exec.err = nil
+ return nil
}
-func (exec *execCtx) processCurrentEpoch() bool {
- exec.log.Debug("process epoch",
+func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
+ exec.log.Debug(ctx, logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
- traverser, ok := exec.generateTraverser(exec.containerID())
- if !ok {
- return true
+ traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch)
+ if err != nil {
+ return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err)
}
- ctx, cancel := context.WithCancel(exec.context())
+ ctx, cancel := context.WithCancel(ctx)
defer cancel()
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug("no more nodes, abort placement iteration")
+ exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
break
}
@@ -76,8 +72,8 @@ func (exec *execCtx) processCurrentEpoch() bool {
defer wg.Done()
select {
case <-ctx.Done():
- exec.log.Debug("interrupt placement iteration by context",
- zap.String("error", ctx.Err().Error()))
+ exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
+ zap.Error(ctx.Err()))
return
default:
}
@@ -86,35 +82,43 @@ func (exec *execCtx) processCurrentEpoch() bool {
client.NodeInfoFromNetmapElement(&info, addrs[i])
- exec.log.Debug("processing node...", zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
+ exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
c, err := exec.svc.clientConstructor.get(info)
if err != nil {
- mtx.Lock()
- exec.status = statusUndefined
- exec.err = err
- mtx.Unlock()
-
- exec.log.Debug("could not construct remote node client")
+ exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err))
return
}
- ids, err := c.searchObjects(exec, info)
+ ids, err := c.searchObjects(ctx, exec, info)
if err != nil {
- exec.log.Debug("remote operation failed",
- zap.String("error", err.Error()))
+ exec.log.Debug(ctx, logs.SearchRemoteOperationFailed,
+ zap.Error(err))
return
}
mtx.Lock()
- exec.writeIDList(ids)
+ err = exec.writeIDList(ids)
mtx.Unlock()
+ if err != nil {
+ exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err))
+ return
+ }
}(i)
}
wg.Wait()
}
- return false
+ return nil
+}
+
+func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) {
+ cnrID := exec.containerID()
+ cnr, err := exec.svc.containerSource.Get(ctx, cnrID)
+ if err != nil {
+ return containerSDK.Container{}, err
+ }
+ return cnr.Value, nil
}
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index 18700b2541..ced51ecce2 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -3,59 +3,34 @@ package searchsvc
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
-type statusError struct {
- status int
- err error
-}
-
type execCtx struct {
svc *Service
- ctx context.Context
-
prm Prm
- statusError
-
log *logger.Logger
curProcEpoch uint64
}
-const (
- statusUndefined int = iota
- statusOK
-)
-
-func (exec *execCtx) prepare() {
- if _, ok := exec.prm.writer.(*uniqueIDWriter); !ok {
- exec.prm.writer = newUniqueAddressWriter(exec.prm.writer)
- }
-}
-
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = &logger.Logger{Logger: l.With(
+ exec.log = l.With(
zap.String("request", "SEARCH"),
zap.Stringer("container", exec.containerID()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )}
+ )
}
-func (exec execCtx) context() context.Context {
- return exec.ctx
-}
-
-func (exec execCtx) isLocal() bool {
+func (exec *execCtx) isLocal() bool {
return exec.prm.common.LocalOnly()
}
@@ -63,7 +38,7 @@ func (exec *execCtx) containerID() cid.ID {
return exec.prm.cnr
}
-func (exec *execCtx) searchFilters() object.SearchFilters {
+func (exec *execCtx) searchFilters() objectSDK.SearchFilters {
return exec.prm.filters
}
@@ -75,61 +50,36 @@ func (exec *execCtx) netmapLookupDepth() uint64 {
return exec.prm.common.NetmapLookupDepth()
}
-func (exec *execCtx) initEpoch() bool {
+func (exec *execCtx) initEpoch(ctx context.Context) error {
exec.curProcEpoch = exec.netmapEpoch()
if exec.curProcEpoch > 0 {
- return true
+ return nil
}
- e, err := exec.svc.currentEpochReceiver.currentEpoch()
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not get current epoch number",
- zap.String("error", err.Error()),
- )
-
- return false
- case err == nil:
- exec.curProcEpoch = e
- return true
+ e, err := exec.svc.currentEpochReceiver.Epoch(ctx)
+ if err != nil {
+ return err
}
+
+ exec.curProcEpoch = e
+ return nil
}
-func (exec *execCtx) generateTraverser(cnr cid.ID) (*placement.Traverser, bool) {
- t, err := exec.svc.traverserGenerator.generateTraverser(cnr, exec.curProcEpoch)
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not generate container traverser",
- zap.String("error", err.Error()),
- )
-
- return nil, false
- case err == nil:
- return t, true
- }
+func (exec *execCtx) writeIDList(ids []oid.ID) error {
+ ids = exec.filterAllowedObjectIDs(ids)
+ return exec.prm.writer.WriteIDs(ids)
}
-func (exec *execCtx) writeIDList(ids []oid.ID) {
- err := exec.prm.writer.WriteIDs(ids)
-
- switch {
- default:
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("could not write object identifiers",
- zap.String("error", err.Error()),
- )
- case err == nil:
- exec.status = statusOK
- exec.err = nil
+func (exec *execCtx) filterAllowedObjectIDs(objIDs []oid.ID) []oid.ID {
+ sessionToken := exec.prm.common.SessionToken()
+ if sessionToken == nil {
+ return objIDs
}
+ result := make([]oid.ID, 0, len(objIDs))
+ for _, objID := range objIDs {
+ if sessionToken.AssertObject(objID) {
+ result = append(result, objID)
+ }
+ }
+ return result
}
diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go
index 1e47769212..ec65ab06a2 100644
--- a/pkg/services/object/search/local.go
+++ b/pkg/services/object/search/local.go
@@ -1,22 +1,23 @@
package searchsvc
import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
-func (exec *execCtx) executeLocal() {
- ids, err := exec.svc.localStorage.search(exec)
-
+func (exec *execCtx) executeLocal(ctx context.Context) error {
+ ids, err := exec.svc.localStorage.search(ctx, exec)
if err != nil {
- exec.status = statusUndefined
- exec.err = err
-
- exec.log.Debug("local operation failed",
- zap.String("error", err.Error()),
- )
-
- return
+ exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err))
+ return err
}
- exec.writeIDList(ids)
+ if err := exec.writeIDList(ids); err != nil {
+ return fmt.Errorf("%s: %w", logs.SearchCouldNotWriteObjectIdentifiers, err)
+ }
+
+ return nil
}
diff --git a/pkg/services/object/search/prm.go b/pkg/services/object/search/prm.go
index c80257bd3e..95fe82e2fd 100644
--- a/pkg/services/object/search/prm.go
+++ b/pkg/services/object/search/prm.go
@@ -1,22 +1,24 @@
package searchsvc
import (
+ "context"
+
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
// Prm groups parameters of Get service call.
type Prm struct {
- writer IDListWriter
+ writer *uniqueIDWriter
common *util.CommonPrm
cnr cid.ID
- filters object.SearchFilters
+ filters objectSDK.SearchFilters
forwarder RequestForwarder
}
@@ -29,7 +31,7 @@ type IDListWriter interface {
// RequestForwarder is a callback for forwarding of the
// original Search requests.
-type RequestForwarder func(coreclient.NodeInfo, coreclient.MultiAddressClient) ([]oid.ID, error)
+type RequestForwarder func(context.Context, coreclient.NodeInfo, coreclient.MultiAddressClient) ([]oid.ID, error)
// SetCommonParameters sets common parameters of the operation.
func (p *Prm) SetCommonParameters(common *util.CommonPrm) {
@@ -38,7 +40,7 @@ func (p *Prm) SetCommonParameters(common *util.CommonPrm) {
// SetWriter sets target component to write list of object identifiers.
func (p *Prm) SetWriter(w IDListWriter) {
- p.writer = w
+ p.writer = newUniqueAddressWriter(w)
}
// SetRequestForwarder sets callback for forwarding
@@ -53,6 +55,6 @@ func (p *Prm) WithContainerID(id cid.ID) {
}
// WithSearchFilters sets search filters.
-func (p *Prm) WithSearchFilters(fs object.SearchFilters) {
+func (p *Prm) WithSearchFilters(fs objectSDK.SearchFilters) {
p.filters = fs
}
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 5e1249a1b9..76c091f85a 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -3,6 +3,7 @@ package searchsvc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
@@ -10,41 +11,35 @@ import (
func (s *Service) Search(ctx context.Context, prm Prm) error {
exec := &execCtx{
svc: s,
- ctx: ctx,
prm: prm,
}
- exec.prepare()
-
exec.setLogger(s.log)
- exec.execute()
-
- return exec.statusError.err
+ return exec.execute(ctx)
}
-func (exec *execCtx) execute() {
- exec.log.Debug("serving request...")
+func (exec *execCtx) execute(ctx context.Context) error {
+ exec.log.Debug(ctx, logs.ServingRequest)
- // perform local operation
- exec.executeLocal()
+ err := exec.executeLocal(ctx)
+ exec.logResult(ctx, err)
- exec.analyzeStatus(true)
+ if exec.isLocal() {
+ exec.log.Debug(ctx, logs.SearchReturnResultDirectly)
+ return err
+ }
+
+ err = exec.executeOnContainer(ctx)
+ exec.logResult(ctx, err)
+ return err
}
-func (exec *execCtx) analyzeStatus(execCnr bool) {
- // analyze local result
- switch exec.status {
+func (exec *execCtx) logResult(ctx context.Context, err error) {
+ switch {
default:
- exec.log.Debug("operation finished with error",
- zap.String("error", exec.err.Error()),
- )
- case statusOK:
- exec.log.Debug("operation finished successfully")
- }
-
- if execCnr {
- exec.executeOnContainer()
- exec.analyzeStatus(false)
+ exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
+ case err == nil:
+ exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
}
}
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index bb3d422b30..918ad421f8 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -6,20 +6,28 @@ import (
"crypto/sha256"
"errors"
"fmt"
+ "slices"
"strconv"
"testing"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ sessionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "github.com/google/uuid"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
@@ -51,10 +59,18 @@ type simpleIDWriter struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) currentEpoch() (uint64, error) {
+func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
return uint64(e), nil
}
+type errIDWriter struct {
+ err error
+}
+
+func (e errIDWriter) WriteIDs(ids []oid.ID) error {
+ return e.err
+}
+
func (s *simpleIDWriter) WriteIDs(ids []oid.ID) error {
s.ids = append(s.ids, ids...)
return nil
@@ -66,15 +82,16 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) generateTraverser(_ cid.ID, epoch uint64) (*placement.Traverser, error) {
- return placement.NewTraverser(
+func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
+ t, err := placement.NewTraverser(context.Background(),
placement.ForContainer(g.c),
placement.UseBuilder(g.b[epoch]),
placement.WithoutSuccessTracking(),
)
+ return t, &containerCore.Container{Value: g.c}, err
}
-func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
@@ -87,8 +104,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.
return nil, errors.New("vectors for address not found")
}
- res := make([][]netmap.NodeInfo, len(vs))
- copy(res, vs)
+ res := slices.Clone(vs)
return res, nil
}
@@ -102,7 +118,7 @@ func (c *testClientCache) get(info clientcore.NodeInfo) (searchClient, error) {
return v, nil
}
-func (s *testStorage) search(exec *execCtx) ([]oid.ID, error) {
+func (s *testStorage) search(_ context.Context, exec *execCtx) ([]oid.ID, error) {
v, ok := s.items[exec.containerID().EncodeToString()]
if !ok {
return nil, nil
@@ -111,7 +127,7 @@ func (s *testStorage) search(exec *execCtx) ([]oid.ID, error) {
return v.ids, v.err
}
-func (c *testStorage) searchObjects(exec *execCtx, _ clientcore.NodeInfo) ([]oid.ID, error) {
+func (c *testStorage) searchObjects(_ context.Context, exec *execCtx, _ clientcore.NodeInfo) ([]oid.ID, error) {
v, ok := c.items[exec.containerID().EncodeToString()]
if !ok {
return nil, nil
@@ -135,7 +151,7 @@ func testSHA256() (cs [sha256.Size]byte) {
func generateIDs(num int) []oid.ID {
res := make([]oid.ID, num)
- for i := 0; i < num; i++ {
+ for i := range num {
res[i].SetSHA256(testSHA256())
}
@@ -147,7 +163,7 @@ func TestGetLocalOnly(t *testing.T) {
newSvc := func(storage *testStorage) *Service {
svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(false)
+ svc.log = test.NewLogger(t)
svc.localStorage = storage
return svc
@@ -189,6 +205,20 @@ func TestGetLocalOnly(t *testing.T) {
w := new(simpleIDWriter)
p := newPrm(cnr, w)
+ err := svc.Search(ctx, p)
+ require.ErrorIs(t, err, testErr)
+ })
+ t.Run("FAIL while writing ID", func(t *testing.T) {
+ storage := newTestStorage()
+ svc := newSvc(storage)
+
+ cnr := cidtest.ID()
+ storage.addResult(cnr, []oid.ID{oidtest.ID()}, nil)
+
+ testErr := errors.New("any error")
+ w := errIDWriter{testErr}
+ p := newPrm(cnr, w)
+
err := svc.Search(ctx, p)
require.ErrorIs(t, err, testErr)
})
@@ -202,7 +232,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
ns := make([]netmap.NodeInfo, dim[i])
as := make([]string, dim[i])
- for j := 0; j < dim[i]; j++ {
+ for j := range dim[i] {
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
strconv.Itoa(i),
strconv.Itoa(60000+j),
@@ -249,7 +279,7 @@ func TestGetRemoteSmall(t *testing.T) {
newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(false)
+ svc.log = test.NewLogger(t)
svc.localStorage = newTestStorage()
const curEpoch = 13
@@ -275,33 +305,34 @@ func TestGetRemoteSmall(t *testing.T) {
return p
}
+ var addr oid.Address
+ addr.SetContainer(id)
+
+ ns, as := testNodeMatrix(t, placementDim)
+
+ builder := &testPlacementBuilder{
+ vectors: map[string][][]netmap.NodeInfo{
+ addr.EncodeToString(): ns,
+ },
+ }
+
+ c1 := newTestStorage()
+ ids1 := generateIDs(10)
+
+ c2 := newTestStorage()
+ ids2 := generateIDs(10)
+
+ svc := newSvc(builder, &testClientCache{
+ clients: map[string]*testStorage{
+ as[0][0]: c1,
+ as[0][1]: c2,
+ },
+ })
+
t.Run("OK", func(t *testing.T) {
- var addr oid.Address
- addr.SetContainer(id)
-
- ns, as := testNodeMatrix(t, placementDim)
-
- builder := &testPlacementBuilder{
- vectors: map[string][][]netmap.NodeInfo{
- addr.EncodeToString(): ns,
- },
- }
-
- c1 := newTestStorage()
- ids1 := generateIDs(10)
c1.addResult(id, ids1, nil)
-
- c2 := newTestStorage()
- ids2 := generateIDs(10)
c2.addResult(id, ids2, nil)
- svc := newSvc(builder, &testClientCache{
- clients: map[string]*testStorage{
- as[0][0]: c1,
- as[0][1]: c2,
- },
- })
-
w := new(simpleIDWriter)
p := newPrm(id, w)
@@ -314,6 +345,49 @@ func TestGetRemoteSmall(t *testing.T) {
require.Contains(t, w.ids, id)
}
})
+ t.Run("non-local fail is not a FAIL", func(t *testing.T) {
+ testErr := errors.New("opaque")
+
+ c1.addResult(id, ids1, nil)
+ c2.addResult(id, nil, testErr)
+
+ w := new(simpleIDWriter)
+ p := newPrm(id, w)
+
+ err := svc.Search(ctx, p)
+ require.NoError(t, err)
+ require.Equal(t, ids1, w.ids)
+ })
+ t.Run("client init fail is not a FAIL", func(t *testing.T) {
+ svc := newSvc(builder, &testClientCache{
+ clients: map[string]*testStorage{
+ as[0][0]: c1,
+ },
+ })
+ c1.addResult(id, ids1, nil)
+ c2.addResult(id, ids2, nil)
+
+ w := new(simpleIDWriter)
+ p := newPrm(id, w)
+
+ err := svc.Search(ctx, p)
+ require.NoError(t, err)
+ require.Equal(t, ids1, w.ids)
+ })
+ t.Run("context is respected", func(t *testing.T) {
+ c1.addResult(id, ids1, nil)
+ c2.addResult(id, ids2, nil)
+
+ w := new(simpleIDWriter)
+ p := newPrm(id, w)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ err := svc.Search(ctx, p)
+ require.NoError(t, err)
+ require.Empty(t, w.ids)
+ })
}
func TestGetFromPastEpoch(t *testing.T) {
@@ -358,7 +432,7 @@ func TestGetFromPastEpoch(t *testing.T) {
c22.addResult(idCnr, ids22, nil)
svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(false)
+ svc.log = test.NewLogger(t)
svc.localStorage = newTestStorage()
const curEpoch = 13
@@ -425,3 +499,128 @@ func TestGetFromPastEpoch(t *testing.T) {
require.NoError(t, err)
assertContains(ids11, ids12, ids21, ids22)
}
+
+func TestGetWithSessionToken(t *testing.T) {
+ ctx := context.Background()
+
+ placementDim := []int{2}
+
+ rs := make([]netmap.ReplicaDescriptor, len(placementDim))
+ for i := range placementDim {
+ rs[i].SetNumberOfObjects(uint32(placementDim[i]))
+ }
+
+ var pp netmap.PlacementPolicy
+ pp.AddReplicas(rs...)
+
+ var cnr container.Container
+ cnr.SetPlacementPolicy(pp)
+
+ var id cid.ID
+ container.CalculateID(&id, cnr)
+
+ var addr oid.Address
+ addr.SetContainer(id)
+
+ ns, as := testNodeMatrix(t, placementDim)
+
+ builder := &testPlacementBuilder{
+ vectors: map[string][][]netmap.NodeInfo{
+ addr.EncodeToString(): ns,
+ },
+ }
+
+ localStorage := newTestStorage()
+ localIDs := generateIDs(10)
+ localStorage.addResult(id, localIDs, nil)
+
+ c1 := newTestStorage()
+ ids1 := generateIDs(10)
+ c1.addResult(id, ids1, nil)
+
+ c2 := newTestStorage()
+ ids2 := generateIDs(10)
+ c2.addResult(id, ids2, nil)
+
+ w := new(simpleIDWriter)
+
+ svc := &Service{cfg: new(cfg)}
+ svc.log = test.NewLogger(t)
+ svc.localStorage = localStorage
+
+ const curEpoch = 13
+
+ svc.traverserGenerator = &testTraverserGenerator{
+ c: cnr,
+ b: map[uint64]placement.Builder{
+ curEpoch: builder,
+ },
+ }
+ svc.clientConstructor = &testClientCache{
+ clients: map[string]*testStorage{
+ as[0][0]: c1,
+ as[0][1]: c2,
+ },
+ }
+
+ svc.currentEpochReceiver = testEpochReceiver(curEpoch)
+
+ metaStub := &metaStub{
+ TTL: 5,
+ LimitByObjectIDs: append(append(localIDs[:5], ids1[:5]...), ids2[:5]...),
+ T: t,
+ Exp: 20,
+ ContainerID: id,
+ }
+
+ p := Prm{}
+ p.WithContainerID(id)
+ p.SetWriter(w)
+ var err error
+ p.common, err = util.CommonPrmFromV2(metaStub)
+ require.NoError(t, err)
+
+ err = svc.Search(ctx, p)
+ require.NoError(t, err)
+ require.Len(t, w.ids, 15)
+
+ for _, id := range metaStub.LimitByObjectIDs {
+ require.Contains(t, w.ids, id)
+ }
+}
+
+type metaStub struct {
+ TTL uint32
+ Exp uint64
+ LimitByObjectIDs []oid.ID
+ T *testing.T
+ ContainerID cid.ID
+}
+
+func (m *metaStub) GetMetaHeader() *session.RequestMetaHeader {
+ var result session.RequestMetaHeader
+ result.SetTTL(m.TTL)
+
+ tokenObj := new(sessionsdk.Object)
+ tokenObj.ForVerb(sessionsdk.VerbObjectSearch)
+ tokenObj.LimitByObjects(m.LimitByObjectIDs...)
+ tokenObj.SetID(uuid.New())
+ tokenObj.SetExp(m.Exp)
+ tokenObj.BindContainer(m.ContainerID)
+
+ pubKey := &frostfsecdsa.PublicKey{}
+
+ tokenObj.SetAuthKey(pubKey)
+
+ priv, err := keys.NewPrivateKey()
+ require.NoError(m.T, err)
+
+ require.NoError(m.T, tokenObj.Sign(priv.PrivateKey))
+
+ var token session.Token
+ tokenObj.WriteToV2(&token)
+
+ result.SetSessionToken(&token)
+
+ return &result
+}
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index aebcfca0fd..56fe56468f 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -1,7 +1,10 @@
package searchsvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -24,7 +27,7 @@ type Option func(*cfg)
type searchClient interface {
// searchObjects searches objects on the specified node.
// MUST NOT modify execCtx as it can be accessed concurrently.
- searchObjects(*execCtx, client.NodeInfo) ([]oid.ID, error)
+ searchObjects(context.Context, *execCtx, client.NodeInfo) ([]oid.ID, error)
}
type ClientConstructor interface {
@@ -35,7 +38,7 @@ type cfg struct {
log *logger.Logger
localStorage interface {
- search(*execCtx) ([]oid.ID, error)
+ search(context.Context, *execCtx) ([]oid.ID, error)
}
clientConstructor interface {
@@ -43,27 +46,41 @@ type cfg struct {
}
traverserGenerator interface {
- generateTraverser(cid.ID, uint64) (*placement.Traverser, error)
+ GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
}
currentEpochReceiver interface {
- currentEpoch() (uint64, error)
+ Epoch(ctx context.Context) (uint64, error)
}
keyStore *util.KeyStorage
-}
-func defaultCfg() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- clientConstructor: new(clientConstructorWrapper),
- }
+ containerSource container.Source
}
// New creates, initializes and returns utility serving
// Object.Get service requests.
-func New(opts ...Option) *Service {
- c := defaultCfg()
+func New(e *engine.StorageEngine,
+ cc ClientConstructor,
+ tg *util.TraverserGenerator,
+ ns netmap.Source,
+ ks *util.KeyStorage,
+ cs container.Source,
+ opts ...Option,
+) *Service {
+ c := &cfg{
+ log: logger.NewLoggerWrapper(zap.L()),
+ clientConstructor: &clientConstructorWrapper{
+ constructor: cc,
+ },
+ localStorage: &storageEngineWrapper{
+ storage: e,
+ },
+ traverserGenerator: tg,
+ currentEpochReceiver: ns,
+ keyStore: ks,
+ containerSource: cs,
+ }
for i := range opts {
opts[i](c)
@@ -77,49 +94,6 @@ func New(opts ...Option) *Service {
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Search service"))}
- }
-}
-
-// WithLocalStorageEngine returns option to set local storage
-// instance.
-func WithLocalStorageEngine(e *engine.StorageEngine) Option {
- return func(c *cfg) {
- c.localStorage = &storageEngineWrapper{
- storage: e,
- }
- }
-}
-
-// WithClientConstructor returns option to set constructor of remote node clients.
-func WithClientConstructor(v ClientConstructor) Option {
- return func(c *cfg) {
- c.clientConstructor.(*clientConstructorWrapper).constructor = v
- }
-}
-
-// WithTraverserGenerator returns option to set generator of
-// placement traverser to get the objects from containers.
-func WithTraverserGenerator(t *util.TraverserGenerator) Option {
- return func(c *cfg) {
- c.traverserGenerator = (*traverseGeneratorWrapper)(t)
- }
-}
-
-// WithNetMapSource returns option to set network
-// map storage to receive current network state.
-func WithNetMapSource(nmSrc netmap.Source) Option {
- return func(c *cfg) {
- c.currentEpochReceiver = &nmSrcWrapper{
- nmSrc: nmSrc,
- }
- }
-}
-
-// WithKeyStorage returns option to set private
-// key storage for session tokens and node key.
-func WithKeyStorage(store *util.KeyStorage) Option {
- return func(c *cfg) {
- c.keyStore = store
+ c.log = l
}
}
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 741a224afb..0be5345b9a 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -1,15 +1,15 @@
package searchsvc
import (
+ "context"
+ "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -33,13 +33,10 @@ type storageEngineWrapper struct {
storage *engine.StorageEngine
}
-type traverseGeneratorWrapper util.TraverserGenerator
-
-type nmSrcWrapper struct {
- nmSrc netmap.Source
-}
-
-func newUniqueAddressWriter(w IDListWriter) IDListWriter {
+func newUniqueAddressWriter(w IDListWriter) *uniqueIDWriter {
+ if w, ok := w.(*uniqueIDWriter); ok {
+ return w
+ }
return &uniqueIDWriter{
written: make(map[oid.ID]struct{}),
writer: w,
@@ -57,7 +54,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error {
}
// exclude processed address
- list = append(list[:i], list[i+1:]...)
+ list = slices.Delete(list, i, i+1)
i--
}
@@ -77,9 +74,9 @@ func (c *clientConstructorWrapper) get(info client.NodeInfo) (searchClient, erro
}, nil
}
-func (c *clientWrapper) searchObjects(exec *execCtx, info client.NodeInfo) ([]oid.ID, error) {
+func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info client.NodeInfo) ([]oid.ID, error) {
if exec.prm.forwarder != nil {
- return exec.prm.forwarder(info, c.client)
+ return exec.prm.forwarder(ctx, info, c.client)
}
var sessionInfo *util.SessionInfo
@@ -98,7 +95,6 @@ func (c *clientWrapper) searchObjects(exec *execCtx, info client.NodeInfo) ([]oi
var prm internalclient.SearchObjectsPrm
- prm.SetContext(exec.context())
prm.SetClient(c.client)
prm.SetPrivateKey(key)
prm.SetSessionToken(exec.prm.common.SessionToken())
@@ -109,7 +105,7 @@ func (c *clientWrapper) searchObjects(exec *execCtx, info client.NodeInfo) ([]oi
prm.SetContainerID(exec.containerID())
prm.SetFilters(exec.searchFilters())
- res, err := internalclient.SearchObjects(prm)
+ res, err := internalclient.SearchObjects(ctx, prm)
if err != nil {
return nil, err
}
@@ -117,12 +113,16 @@ func (c *clientWrapper) searchObjects(exec *execCtx, info client.NodeInfo) ([]oi
return res.IDList(), nil
}
-func (e *storageEngineWrapper) search(exec *execCtx) ([]oid.ID, error) {
+func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
+ cnr, err := exec.getContainer(ctx)
+ if err != nil {
+ return nil, err
+ }
var selectPrm engine.SelectPrm
selectPrm.WithFilters(exec.searchFilters())
- selectPrm.WithContainerID(exec.containerID())
+ selectPrm.WithContainerID(exec.containerID(), container.IsIndexedContainer(cnr))
- r, err := e.storage.Select(selectPrm)
+ r, err := e.storage.Select(ctx, selectPrm)
if err != nil {
return nil, err
}
@@ -139,11 +139,3 @@ func idsFromAddresses(addrs []oid.Address) []oid.ID {
return ids
}
-
-func (e *traverseGeneratorWrapper) generateTraverser(cnr cid.ID, epoch uint64) (*placement.Traverser, error) {
- return (*util.TraverserGenerator)(e).GenerateTraverser(cnr, nil, epoch)
-}
-
-func (n *nmSrcWrapper) currentEpoch() (uint64, error) {
- return n.nmSrc.Epoch()
-}
diff --git a/pkg/services/object/search/v2/request_forwarder.go b/pkg/services/object/search/v2/request_forwarder.go
new file mode 100644
index 0000000000..7bb6e4d3c8
--- /dev/null
+++ b/pkg/services/object/search/v2/request_forwarder.go
@@ -0,0 +1,99 @@
+package searchsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type requestForwarder struct {
+ OnceResign sync.Once
+ Request *objectV2.SearchRequest
+ Key *ecdsa.PrivateKey
+}
+
+func (f *requestForwarder) forwardRequest(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) ([]oid.ID, error) {
+ var err error
+
+ // once compose and resign forwarding request
+ f.OnceResign.Do(func() {
+ // compose meta header of the local server
+ metaHdr := new(session.RequestMetaHeader)
+ metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1)
+ // TODO: #1165 think how to set the other fields
+ metaHdr.SetOrigin(f.Request.GetMetaHeader())
+
+ f.Request.SetMetaHeader(metaHdr)
+
+ err = signature.SignServiceMessage(f.Key, f.Request)
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ var searchStream *rpc.SearchResponseReader
+ err = c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error {
+ searchStream, err = rpc.SearchObjects(cli, f.Request, rpcclient.WithContext(ctx))
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // code below is copy-pasted from c.SearchObjects implementation,
+ // perhaps it is worth highlighting the utility function in frostfs-api-go
+ var (
+ searchResult []oid.ID
+ resp = new(objectV2.SearchResponse)
+ )
+
+ for {
+ // receive message from server stream
+ err := searchStream.Read(resp)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ return nil, fmt.Errorf("reading the response failed: %w", err)
+ }
+
+ // verify response key
+ if err = internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
+ return nil, err
+ }
+
+ // verify response structure
+ if err := signature.VerifyServiceMessage(resp); err != nil {
+ return nil, fmt.Errorf("could not verify %T: %w", resp, err)
+ }
+
+ chunk := resp.GetBody().GetIDList()
+ var id oid.ID
+
+ for i := range chunk {
+ err = id.ReadFromV2(chunk[i])
+ if err != nil {
+ return nil, fmt.Errorf("invalid object ID: %w", err)
+ }
+
+ searchResult = append(searchResult, id)
+ }
+ }
+
+ return searchResult, nil
+}
diff --git a/pkg/services/object/search/v2/service.go b/pkg/services/object/search/v2/service.go
index 17e1bc7e04..856cd9f041 100644
--- a/pkg/services/object/search/v2/service.go
+++ b/pkg/services/object/search/v2/service.go
@@ -1,36 +1,23 @@
package searchsvc
import (
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Search operation of Object service v2.
type Service struct {
- *cfg
-}
-
-// Option represents Service constructor option.
-type Option func(*cfg)
-
-type cfg struct {
- svc *searchsvc.Service
-
+ svc *searchsvc.Service
keyStorage *objutil.KeyStorage
}
// NewService constructs Service instance from provided options.
-func NewService(opts ...Option) *Service {
- c := new(cfg)
-
- for i := range opts {
- opts[i](c)
- }
-
+func NewService(s *searchsvc.Service, ks *objutil.KeyStorage) *Service {
return &Service{
- cfg: c,
+ svc: s,
+ keyStorage: ks,
}
}
@@ -43,18 +30,3 @@ func (s *Service) Search(req *objectV2.SearchRequest, stream objectSvc.SearchStr
return s.svc.Search(stream.Context(), *p)
}
-
-// WithInternalService returns option to set entity
-// that handles request payload.
-func WithInternalService(v *searchsvc.Service) Option {
- return func(c *cfg) {
- c.svc = v
- }
-}
-
-// WithKeyStorage returns option to set local private key storage.
-func WithKeyStorage(ks *objutil.KeyStorage) Option {
- return func(c *cfg) {
- c.keyStorage = ks
- }
-}
diff --git a/pkg/services/object/search/v2/streamer.go b/pkg/services/object/search/v2/streamer.go
index 15e2d53d5a..93b2813436 100644
--- a/pkg/services/object/search/v2/streamer.go
+++ b/pkg/services/object/search/v2/streamer.go
@@ -1,9 +1,9 @@
package searchsvc
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/search/v2/util.go b/pkg/services/object/search/v2/util.go
index a2acb48f3b..48ae989582 100644
--- a/pkg/services/object/search/v2/util.go
+++ b/pkg/services/object/search/v2/util.go
@@ -1,24 +1,18 @@
package searchsvc
import (
+ "context"
"errors"
"fmt"
- "io"
- "sync"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -37,8 +31,6 @@ func (s *Service) toPrm(req *objectV2.SearchRequest, stream objectSvc.SearchStre
return nil, fmt.Errorf("invalid container ID: %w", err)
}
- meta := req.GetMetaHeader()
-
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return nil, err
@@ -52,95 +44,27 @@ func (s *Service) toPrm(req *objectV2.SearchRequest, stream objectSvc.SearchStre
})
if !commonPrm.LocalOnly() {
- var onceResign sync.Once
-
key, err := s.keyStorage.GetKey(nil)
if err != nil {
return nil, err
}
- p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) ([]oid.ID, error) {
- var err error
+ forwarder := &requestForwarder{
+ Request: req,
+ Key: key,
+ }
- // once compose and resign forwarding request
- onceResign.Do(func() {
- // compose meta header of the local server
- metaHdr := new(session.RequestMetaHeader)
- metaHdr.SetTTL(meta.GetTTL() - 1)
- // TODO: #1165 think how to set the other fields
- metaHdr.SetOrigin(meta)
-
- req.SetMetaHeader(metaHdr)
-
- err = signature.SignServiceMessage(key, req)
- })
-
- if err != nil {
- return nil, err
- }
-
- var searchStream *rpc.SearchResponseReader
- err = c.RawForAddress(addr, func(cli *rpcclient.Client) error {
- searchStream, err = rpc.SearchObjects(cli, req, rpcclient.WithContext(stream.Context()))
- return err
- })
- if err != nil {
- return nil, err
- }
-
- // code below is copy-pasted from c.SearchObjects implementation,
- // perhaps it is worth highlighting the utility function in frostfs-api-go
- var (
- searchResult []oid.ID
- resp = new(objectV2.SearchResponse)
- )
-
- for {
- // receive message from server stream
- err := searchStream.Read(resp)
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- return nil, fmt.Errorf("reading the response failed: %w", err)
- }
-
- // verify response key
- if err = internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
- return nil, err
- }
-
- // verify response structure
- if err := signature.VerifyServiceMessage(resp); err != nil {
- return nil, fmt.Errorf("could not verify %T: %w", resp, err)
- }
-
- chunk := resp.GetBody().GetIDList()
- var id oid.ID
-
- for i := range chunk {
- err = id.ReadFromV2(chunk[i])
- if err != nil {
- return nil, fmt.Errorf("invalid object ID: %w", err)
- }
-
- searchResult = append(searchResult, id)
- }
- }
-
- return searchResult, nil
- }))
+ p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequest))
}
p.WithContainerID(id)
- p.WithSearchFilters(object.NewSearchFiltersFromV2(body.GetFilters()))
+ p.WithSearchFilters(objectSDK.NewSearchFiltersFromV2(body.GetFilters()))
return p, nil
}
-func groupAddressRequestForwarder(f func(network.Address, client.MultiAddressClient, []byte) ([]oid.ID, error)) searchsvc.RequestForwarder {
- return func(info client.NodeInfo, c client.MultiAddressClient) ([]oid.ID, error) {
+func groupAddressRequestForwarder(f func(context.Context, network.Address, client.MultiAddressClient, []byte) ([]oid.ID, error)) searchsvc.RequestForwarder {
+ return func(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) ([]oid.ID, error) {
var (
firstErr error
res []oid.ID
@@ -161,7 +85,7 @@ func groupAddressRequestForwarder(f func(network.Address, client.MultiAddressCli
// would be nice to log otherwise
}()
- res, err = f(addr, c, key)
+ res, err = f(ctx, addr, c, key)
return
})
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index d95c6c906e..e652939770 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -3,8 +3,8 @@ package object
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// GetObjectStream is an interface of FrostFS API v2 compatible object streamer.
@@ -27,8 +27,14 @@ type SearchStream interface {
// PutObjectStream is an interface of FrostFS API v2 compatible client's object streamer.
type PutObjectStream interface {
- Send(*object.PutRequest) error
- CloseAndRecv() (*object.PutResponse, error)
+ Send(context.Context, *object.PutRequest) error
+ CloseAndRecv(context.Context) (*object.PutResponse, error)
+}
+
+// PatchObjectStream is an interface of FrostFS API v2 compatible patch streamer.
+type PatchObjectStream interface {
+ Send(context.Context, *object.PatchRequest) error
+ CloseAndRecv(context.Context) (*object.PatchResponse, error)
}
// ServiceServer is an interface of utility
@@ -36,9 +42,11 @@ type PutObjectStream interface {
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
Put(context.Context) (PutObjectStream, error)
+ Patch(context.Context) (PatchObjectStream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
GetRange(*object.GetRangeRequest, GetObjectRangeStream) error
GetRangeHash(context.Context, *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error)
+ PutSingle(context.Context, *object.PutSingleRequest) (*object.PutSingleResponse, error)
}
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 585fc659ad..fd8e926ddc 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -5,82 +5,103 @@ import (
"crypto/ecdsa"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type SignService struct {
- key *ecdsa.PrivateKey
-
sigSvc *util.SignService
svc ServiceServer
}
type searchStreamSigner struct {
- util.ServerStream
-
- respWriter util.ResponseMessageWriter
+ SearchStream
+ sigSvc *util.SignService
nonEmptyResp bool // set on first Send call
}
type getStreamSigner struct {
- util.ServerStream
-
- respWriter util.ResponseMessageWriter
+ GetObjectStream
+ sigSvc *util.SignService
}
type putStreamSigner struct {
- stream *util.RequestMessageStreamer
+ sigSvc *util.SignService
+ stream PutObjectStream
+ err error
+}
+
+type patchStreamSigner struct {
+ sigSvc *util.SignService
+ stream PatchObjectStream
+ err error
}
type getRangeStreamSigner struct {
- util.ServerStream
-
- respWriter util.ResponseMessageWriter
+ GetObjectRangeStream
+ sigSvc *util.SignService
}
func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService {
return &SignService{
- key: key,
sigSvc: util.NewUnarySignService(key),
svc: svc,
}
}
func (s *getStreamSigner) Send(resp *object.GetResponse) error {
- return s.respWriter(resp)
+ return s.send(resp, nil)
+}
+
+func (s *getStreamSigner) send(resp *object.GetResponse, err error) error {
+ if err := s.sigSvc.SignResponse(resp, err); err != nil {
+ return err
+ }
+ return s.GetObjectStream.Send(resp)
}
func (s *SignService) Get(req *object.GetRequest, stream GetObjectStream) error {
- return s.sigSvc.HandleServerStreamRequest(req,
- func(resp util.ResponseMessage) error {
- return stream.Send(resp.(*object.GetResponse))
- },
- func() util.ResponseMessage {
- return new(object.GetResponse)
- },
- func(respWriter util.ResponseMessageWriter) error {
- return s.svc.Get(req, &getStreamSigner{
- ServerStream: stream,
- respWriter: respWriter,
- })
- },
- )
-}
-
-func (s *putStreamSigner) Send(req *object.PutRequest) error {
- return s.stream.Send(req)
-}
-
-func (s *putStreamSigner) CloseAndRecv() (*object.PutResponse, error) {
- r, err := s.stream.CloseAndRecv()
- if err != nil {
- return nil, fmt.Errorf("could not receive response: %w", err)
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(object.GetResponse)
+ _ = s.sigSvc.SignResponse(resp, err)
+ return stream.Send(resp)
}
- return r.(*object.PutResponse), nil
+ w := &getStreamSigner{
+ GetObjectStream: stream,
+ sigSvc: s.sigSvc,
+ }
+ if err := s.svc.Get(req, w); err != nil {
+ return w.send(new(object.GetResponse), err)
+ }
+ return nil
+}
+
+func (s *putStreamSigner) Send(ctx context.Context, req *object.PutRequest) error {
+ if s.err = s.sigSvc.VerifyRequest(req); s.err != nil {
+ return util.ErrAbortStream
+ }
+ if s.err = s.stream.Send(ctx, req); s.err != nil {
+ return util.ErrAbortStream
+ }
+ return nil
+}
+
+func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutResponse, err error) {
+ if s.err != nil {
+ err = s.err
+ resp = new(object.PutResponse)
+ } else {
+ resp, err = s.stream.CloseAndRecv(ctx)
+ if err != nil {
+ err = fmt.Errorf("could not close stream and receive response: %w", err)
+ resp = new(object.PutResponse)
+ }
+ }
+
+ return resp, s.sigSvc.SignResponse(resp, err)
}
func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
@@ -90,120 +111,144 @@ func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
}
return &putStreamSigner{
- stream: s.sigSvc.CreateRequestStreamer(
- func(req any) error {
- return stream.Send(req.(*object.PutRequest))
- },
- func() (util.ResponseMessage, error) {
- return stream.CloseAndRecv()
- },
- func() util.ResponseMessage {
- return new(object.PutResponse)
- },
- ),
+ stream: stream,
+ sigSvc: s.sigSvc,
+ }, nil
+}
+
+func (s *patchStreamSigner) Send(ctx context.Context, req *object.PatchRequest) error {
+ if s.err = s.sigSvc.VerifyRequest(req); s.err != nil {
+ return util.ErrAbortStream
+ }
+ if s.err = s.stream.Send(ctx, req); s.err != nil {
+ return util.ErrAbortStream
+ }
+ return nil
+}
+
+func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PatchResponse, err error) {
+ if s.err != nil {
+ err = s.err
+ resp = new(object.PatchResponse)
+ } else {
+ resp, err = s.stream.CloseAndRecv(ctx)
+ if err != nil {
+ err = fmt.Errorf("could not close stream and receive response: %w", err)
+ resp = new(object.PatchResponse)
+ }
+ }
+
+ return resp, s.sigSvc.SignResponse(resp, err)
+}
+
+func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) {
+ stream, err := s.svc.Patch(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("could not create Put object streamer: %w", err)
+ }
+
+ return &patchStreamSigner{
+ stream: stream,
+ sigSvc: s.sigSvc,
}, nil
}
func (s *SignService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Head(ctx, req.(*object.HeadRequest))
- },
- func() util.ResponseMessage {
- return new(object.HeadResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(object.HeadResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
+ resp, err := util.EnsureNonNilResponse(s.svc.Head(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
+}
- return resp.(*object.HeadResponse), nil
+func (s *SignService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
+ req.GetBody().SetMarshalData(req.GetBody().StableMarshal(nil))
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(object.PutSingleResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
+ }
+ resp, err := util.EnsureNonNilResponse(s.svc.PutSingle(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
func (s *searchStreamSigner) Send(resp *object.SearchResponse) error {
s.nonEmptyResp = true
- return s.respWriter(resp)
+ return s.send(resp, nil)
+}
+
+func (s *searchStreamSigner) send(resp *object.SearchResponse, err error) error {
+ if err := s.sigSvc.SignResponse(resp, err); err != nil {
+ return err
+ }
+ return s.SearchStream.Send(resp)
}
func (s *SignService) Search(req *object.SearchRequest, stream SearchStream) error {
- return s.sigSvc.HandleServerStreamRequest(req,
- func(resp util.ResponseMessage) error {
- return stream.Send(resp.(*object.SearchResponse))
- },
- func() util.ResponseMessage {
- return new(object.SearchResponse)
- },
- func(respWriter util.ResponseMessageWriter) error {
- stream := &searchStreamSigner{
- ServerStream: stream,
- respWriter: respWriter,
- }
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(object.SearchResponse)
+ _ = s.sigSvc.SignResponse(resp, err)
+ return stream.Send(resp)
+ }
- err := s.svc.Search(req, stream)
-
- if err == nil && !stream.nonEmptyResp {
- // The higher component does not write any response in the case of an empty result (which is correct).
- // With the introduction of status returns at least one answer must be signed and sent to the client.
- // This approach is supported by clients who do not know how to work with statuses (one could make
- // a switch according to the protocol version from the request, but the costs of sending an empty
- // answer can be neglected due to the gradual refusal to use the "old" clients).
- return stream.Send(new(object.SearchResponse))
- }
-
- return err
- },
- )
+ ss := &searchStreamSigner{
+ SearchStream: stream,
+ sigSvc: s.sigSvc,
+ }
+ err := s.svc.Search(req, ss)
+ if err != nil || !ss.nonEmptyResp {
+ // The higher component does not write any response in the case of an empty result (which is correct).
+ // With the introduction of status returns at least one answer must be signed and sent to the client.
+ // This approach is supported by clients who do not know how to work with statuses (one could make
+ // a switch according to the protocol version from the request, but the costs of sending an empty
+ // answer can be neglected due to the gradual refusal to use the "old" clients).
+ return ss.send(new(object.SearchResponse), err)
+ }
+ return nil
}
func (s *SignService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Delete(ctx, req.(*object.DeleteRequest))
- },
- func() util.ResponseMessage {
- return new(object.DeleteResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(object.DeleteResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*object.DeleteResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.Delete(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
func (s *getRangeStreamSigner) Send(resp *object.GetRangeResponse) error {
- return s.respWriter(resp)
+ return s.send(resp, nil)
+}
+
+func (s *getRangeStreamSigner) send(resp *object.GetRangeResponse, err error) error {
+ if err := s.sigSvc.SignResponse(resp, err); err != nil {
+ return err
+ }
+ return s.GetObjectRangeStream.Send(resp)
}
func (s *SignService) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error {
- return s.sigSvc.HandleServerStreamRequest(req,
- func(resp util.ResponseMessage) error {
- return stream.Send(resp.(*object.GetRangeResponse))
- },
- func() util.ResponseMessage {
- return new(object.GetRangeResponse)
- },
- func(respWriter util.ResponseMessageWriter) error {
- return s.svc.GetRange(req, &getRangeStreamSigner{
- ServerStream: stream,
- respWriter: respWriter,
- })
- },
- )
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(object.GetRangeResponse)
+ _ = s.sigSvc.SignResponse(resp, err)
+ return stream.Send(resp)
+ }
+
+ w := &getRangeStreamSigner{
+ GetObjectRangeStream: stream,
+ sigSvc: s.sigSvc,
+ }
+ if err := s.svc.GetRange(req, w); err != nil {
+ return w.send(new(object.GetRangeResponse), err)
+ }
+ return nil
}
func (s *SignService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.GetRangeHash(ctx, req.(*object.GetRangeHashRequest))
- },
- func() util.ResponseMessage {
- return new(object.GetRangeHashResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(object.GetRangeHashResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*object.GetRangeHashResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.GetRangeHash(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index 3836103ded..b446d36056 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -4,8 +4,8 @@ import (
"bytes"
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type (
@@ -91,6 +91,10 @@ func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) {
return c.next.Put(ctx)
}
+func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) {
+ return c.next.Patch(ctx)
+}
+
func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
return c.next.Head(ctx, request)
}
@@ -107,6 +111,10 @@ func (c TransportSplitter) Delete(ctx context.Context, request *object.DeleteReq
return c.next.Delete(ctx, request)
}
+func (c TransportSplitter) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
+ return c.next.PutSingle(ctx, req)
+}
+
func (s *rangeStreamMsgSizeCtrl) Send(resp *object.GetRangeResponse) error {
body := resp.GetBody()
@@ -154,16 +162,13 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error {
var newResp *object.SearchResponse
- for ln := uint64(len(ids)); ; {
+ for {
if newResp == nil {
newResp = new(object.SearchResponse)
newResp.SetBody(body)
}
- cut := s.addrAmount
- if cut > ln {
- cut = ln
- }
+ cut := min(s.addrAmount, uint64(len(ids)))
body.SetIDList(ids[:cut])
newResp.SetMetaHeader(resp.GetMetaHeader())
diff --git a/pkg/services/object/util/chain.go b/pkg/services/object/util/chain.go
index 33bcd6c115..b574d5eb60 100644
--- a/pkg/services/object/util/chain.go
+++ b/pkg/services/object/util/chain.go
@@ -4,7 +4,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -21,11 +22,11 @@ type HeadReceiver interface {
//
// If reverseDirection arg is true, then the traversal is done in reverse order.
// Stop boolean result provides the ability to interrupt the traversal.
-type SplitMemberHandler func(member *object.Object, reverseDirection bool) (stop bool)
+type SplitMemberHandler func(member *objectSDK.Object, reverseDirection bool) (stop bool)
// IterateAllSplitLeaves is an iterator over all object split-tree leaves in direct order.
-func IterateAllSplitLeaves(r HeadReceiver, addr oid.Address, h func(*object.Object)) error {
- return IterateSplitLeaves(r, addr, func(leaf *object.Object) bool {
+func IterateAllSplitLeaves(r HeadReceiver, addr oid.Address, h func(*objectSDK.Object)) error {
+ return IterateSplitLeaves(r, addr, func(leaf *objectSDK.Object) bool {
h(leaf)
return false
})
@@ -34,13 +35,13 @@ func IterateAllSplitLeaves(r HeadReceiver, addr oid.Address, h func(*object.Obje
// IterateSplitLeaves is an iterator over object split-tree leaves in direct order.
//
// If member handler returns true, then the iterator aborts without error.
-func IterateSplitLeaves(r HeadReceiver, addr oid.Address, h func(*object.Object) bool) error {
+func IterateSplitLeaves(r HeadReceiver, addr oid.Address, h func(*objectSDK.Object) bool) error {
var (
reverse bool
- leaves []*object.Object
+ leaves []*objectSDK.Object
)
- if err := TraverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
+ if err := TraverseSplitChain(r, addr, func(member *objectSDK.Object, reverseDirection bool) (stop bool) {
reverse = reverseDirection
if reverse {
@@ -83,9 +84,9 @@ func traverseSplitChain(r HeadReceiver, addr oid.Address, h SplitMemberHandler)
switch res := v.(type) {
default:
panic(fmt.Sprintf("unexpected result of %T: %T", r, v))
- case *object.Object:
+ case *objectSDK.Object:
return h(res, false), nil
- case *object.SplitInfo:
+ case *objectSDK.SplitInfo:
link, withLink := res.Link()
last, withLast := res.LastPart()
@@ -93,80 +94,89 @@ func traverseSplitChain(r HeadReceiver, addr oid.Address, h SplitMemberHandler)
default:
return false, errors.New("lack of split information")
case withLink:
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(link)
-
- chain := make([]oid.ID, 0)
-
- if _, err := traverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
- children := member.Children()
-
- if reverseDirection {
- chain = append(children, chain...)
- } else {
- chain = append(chain, children...)
- }
-
- return false
- }); err != nil {
- return false, err
- }
-
- var reverseChain []*object.Object
-
- for i := range chain {
- addr.SetObject(chain[i])
-
- if stop, err := traverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
- if !reverseDirection {
- return h(member, false)
- }
-
- reverseChain = append(reverseChain, member)
- return false
- }); err != nil || stop {
- return stop, err
- }
- }
-
- for i := len(reverseChain) - 1; i >= 0; i-- {
- if h(reverseChain[i], false) {
- return true, nil
- }
- }
+ return traverseByLink(cnr, link, r, h)
case withLast:
- var addr oid.Address
- addr.SetContainer(cnr)
+ return traverseByLast(cnr, last, withLast, res, r, h)
+ }
+ }
+}
- for last, withLast = res.LastPart(); withLast; {
- addr.SetObject(last)
+func traverseByLink(cnr cid.ID, link oid.ID, r HeadReceiver, h SplitMemberHandler) (bool, error) {
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(link)
- var directChain []*object.Object
+ chain := make([]oid.ID, 0)
- if _, err := traverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
- if reverseDirection {
- last, withLast = member.PreviousID()
- return h(member, true)
- }
+ if _, err := traverseSplitChain(r, addr, func(member *objectSDK.Object, reverseDirection bool) (stop bool) {
+ children := member.Children()
- directChain = append(directChain, member)
+ if reverseDirection {
+ chain = append(children, chain...)
+ } else {
+ chain = append(chain, children...)
+ }
- return false
- }); err != nil {
- return false, err
- }
+ return false
+ }); err != nil {
+ return false, err
+ }
- for i := len(directChain) - 1; i >= 0; i-- {
- if h(directChain[i], true) {
- return true, nil
- }
- }
+ var reverseChain []*objectSDK.Object
- if len(directChain) > 0 {
- last, withLast = directChain[len(directChain)-1].PreviousID()
- }
+ for i := range chain {
+ addr.SetObject(chain[i])
+
+ if stop, err := traverseSplitChain(r, addr, func(member *objectSDK.Object, reverseDirection bool) (stop bool) {
+ if !reverseDirection {
+ return h(member, false)
}
+
+ reverseChain = append(reverseChain, member)
+ return false
+ }); err != nil || stop {
+ return stop, err
+ }
+ }
+
+ for i := len(reverseChain) - 1; i >= 0; i-- {
+ if h(reverseChain[i], false) {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func traverseByLast(cnr cid.ID, last oid.ID, withLast bool, res *objectSDK.SplitInfo, r HeadReceiver, h SplitMemberHandler) (bool, error) {
+ var addr oid.Address
+ addr.SetContainer(cnr)
+
+ for last, withLast = res.LastPart(); withLast; {
+ addr.SetObject(last)
+
+ var directChain []*objectSDK.Object
+
+ if _, err := traverseSplitChain(r, addr, func(member *objectSDK.Object, reverseDirection bool) (stop bool) {
+ if reverseDirection {
+ last, withLast = member.PreviousID()
+ return h(member, true)
+ }
+
+ directChain = append(directChain, member)
+
+ return false
+ }); err != nil {
+ return false, err
+ }
+
+ for i := len(directChain) - 1; i >= 0; i-- {
+ if h(directChain[i], true) {
+ return true, nil
+ }
+ }
+
+ if len(directChain) > 0 {
+ last, withLast = directChain[len(directChain)-1].PreviousID()
}
}
diff --git a/pkg/services/object/util/key.go b/pkg/services/object/util/key.go
index e2ece58493..23d6c1c68d 100644
--- a/pkg/services/object/util/key.go
+++ b/pkg/services/object/util/key.go
@@ -67,17 +67,13 @@ func (s *KeyStorage) GetKey(info *SessionInfo) (*ecdsa.PrivateKey, error) {
pToken := s.tokenStore.Get(info.Owner, binID)
if pToken != nil {
- if pToken.ExpiredAt() <= s.networkState.CurrentEpoch() {
- var errExpired apistatus.SessionTokenExpired
-
- return nil, errExpired
+ if pToken.ExpiredAt() < s.networkState.CurrentEpoch() {
+ return nil, new(apistatus.SessionTokenExpired)
}
return pToken.SessionKey(), nil
}
- var errNotFound apistatus.SessionTokenNotFound
-
- return nil, errNotFound
+ return nil, new(apistatus.SessionTokenNotFound)
}
return s.key, nil
diff --git a/pkg/services/object/util/key_test.go b/pkg/services/object/util/key_test.go
index 76e320e0c0..1753a26f75 100644
--- a/pkg/services/object/util/key_test.go
+++ b/pkg/services/object/util/key_test.go
@@ -5,10 +5,10 @@ import (
"crypto/elliptic"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
tokenStorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -25,7 +25,7 @@ func TestNewKeyStorage(t *testing.T) {
tokenStor := tokenStorage.NewTokenStore()
stor := util.NewKeyStorage(&nodeKey.PrivateKey, tokenStor, mockedNetworkState{42})
- owner := *usertest.ID()
+ owner := usertest.ID()
t.Run("node key", func(t *testing.T) {
key, err := stor.GetKey(nil)
@@ -36,7 +36,7 @@ func TestNewKeyStorage(t *testing.T) {
t.Run("unknown token", func(t *testing.T) {
_, err = stor.GetKey(&util.SessionInfo{
ID: uuid.New(),
- Owner: *usertest.ID(),
+ Owner: usertest.ID(),
})
require.Error(t, err)
})
diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go
index beda45c0c7..b108262261 100644
--- a/pkg/services/object/util/log.go
+++ b/pkg/services/object/util/log.go
@@ -1,24 +1,19 @@
package util
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
// LogServiceError writes error message of object service to provided logger.
-func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) {
- l.Error("object service error",
+func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) {
+ l.Error(ctx, logs.UtilObjectServiceError,
zap.String("node", network.StringifyGroup(node)),
zap.String("request", req),
- zap.String("error", err.Error()),
- )
-}
-
-// LogWorkerPoolError writes debug error message of object worker pool to provided logger.
-func LogWorkerPoolError(l *logger.Logger, req string, err error) {
- l.Error("could not push task to worker pool",
- zap.String("request", req),
- zap.String("error", err.Error()),
+ zap.Error(err),
)
}
diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go
index 6cd3856f47..f74b0aab9a 100644
--- a/pkg/services/object/util/placement.go
+++ b/pkg/services/object/util/placement.go
@@ -1,7 +1,9 @@
package util
import (
+ "context"
"fmt"
+ "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -43,8 +45,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu
}
}
-func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(cnr, obj, policy)
+func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -76,8 +78,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac
}
}
-func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(cnr, obj, policy)
+func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -92,7 +94,7 @@ func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapS
}
if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) {
- vs[i] = append(vs[i][:j], vs[i][j+1:]...)
+ vs[i] = slices.Delete(vs[i], j, j+1)
j--
}
}
@@ -122,17 +124,17 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav
// GenerateTraverser generates placement Traverser for provided object address
// using epoch-th network map.
-func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, error) {
+func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
// get network map by epoch
- nm, err := g.netMapSrc.GetNetMapByEpoch(epoch)
+ nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch)
if err != nil {
- return nil, fmt.Errorf("could not get network map #%d: %w", epoch, err)
+ return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err)
}
// get container related container
- cnr, err := g.cnrSrc.Get(idCnr)
+ cnr, err := g.cnrSrc.Get(ctx, idCnr)
if err != nil {
- return nil, fmt.Errorf("could not get container: %w", err)
+ return nil, nil, fmt.Errorf("could not get container: %w", err)
}
// allocate placement traverser options
@@ -160,5 +162,9 @@ func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoc
)
}
- return placement.NewTraverser(traverseOpts...)
+ t, err := placement.NewTraverser(ctx, traverseOpts...)
+ if err != nil {
+ return nil, nil, err
+ }
+ return t, cnr, nil
}
diff --git a/pkg/services/object/util/prm.go b/pkg/services/object/util/prm.go
index c09c07cc1a..34d8ec7047 100644
--- a/pkg/services/object/util/prm.go
+++ b/pkg/services/object/util/prm.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strconv"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
sessionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
@@ -100,16 +100,24 @@ func (p *CommonPrm) SetNetmapLookupDepth(v uint64) {
// ForgetTokens forgets all the tokens read from the request's
// meta information before.
-func (p *CommonPrm) ForgetTokens() {
+func (p *CommonPrm) ForgetTokens() func() {
if p != nil {
+ tk := p.token
+ br := p.bearer
p.token = nil
p.bearer = nil
+ return func() {
+ p.token = tk
+ p.bearer = br
+ }
}
+ return func() {}
}
func CommonPrmFromV2(req interface {
GetMetaHeader() *session.RequestMetaHeader
-}) (*CommonPrm, error) {
+},
+) (*CommonPrm, error) {
meta := req.GetMetaHeader()
ttl := meta.GetTTL()
diff --git a/pkg/services/object_manager/placement/cache.go b/pkg/services/object_manager/placement/cache.go
new file mode 100644
index 0000000000..2a8460ca59
--- /dev/null
+++ b/pkg/services/object_manager/placement/cache.go
@@ -0,0 +1,78 @@
+package placement
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "slices"
+ "sync"
+
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/hashicorp/golang-lru/v2/simplelru"
+)
+
+// ContainerNodesCache caches results of ContainerNodes() invocation between epochs.
+type ContainerNodesCache struct {
+ // mtx protects lastEpoch and containerCache fields.
+ mtx sync.Mutex
+ // lastEpoch contains network map epoch for all values in the container cache.
+ lastEpoch uint64
+ // containerCache caches container nodes by ID. It is used to skip `GetContainerNodes` invocation if
+ // neither netmap nor container has changed.
+ containerCache simplelru.LRUCache[cid.ID, [][]netmapSDK.NodeInfo]
+}
+
+// defaultContainerCacheSize is the default size for the container cache.
+const defaultContainerCacheSize = 10
+
+// NewContainerNodesCache creates new cache which saves the result of the ContainerNodes() invocations.
+// If size is <= 0, defaultContainerCacheSize (10) is used.
+func NewContainerNodesCache(size int) *ContainerNodesCache {
+ if size <= 0 {
+ size = defaultContainerCacheSize
+ }
+
+ cache, _ := simplelru.NewLRU[cid.ID, [][]netmapSDK.NodeInfo](size, nil) // no error
+ return &ContainerNodesCache{
+ containerCache: cache,
+ }
+}
+
+// ContainerNodes returns the result of nm.ContainerNodes(), possibly from the cache.
+func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ c.mtx.Lock()
+ if nm.Epoch() == c.lastEpoch {
+ raw, ok := c.containerCache.Get(cnr)
+ c.mtx.Unlock()
+ if ok {
+ return c.cloneResult(raw), nil
+ }
+ } else {
+ c.lastEpoch = nm.Epoch()
+ c.containerCache.Purge()
+ c.mtx.Unlock()
+ }
+
+ binCnr := make([]byte, sha256.Size)
+ cnr.Encode(binCnr)
+
+ cn, err := nm.ContainerNodes(p, binCnr)
+ if err != nil {
+ return nil, fmt.Errorf("could not get container nodes: %w", err)
+ }
+
+ c.mtx.Lock()
+ if c.lastEpoch == nm.Epoch() {
+ c.containerCache.Add(cnr, cn)
+ }
+ c.mtx.Unlock()
+ return c.cloneResult(cn), nil
+}
+
+func (c *ContainerNodesCache) cloneResult(nodes [][]netmapSDK.NodeInfo) [][]netmapSDK.NodeInfo {
+ result := make([][]netmapSDK.NodeInfo, len(nodes))
+ for repIdx := range nodes {
+ result[repIdx] = slices.Clone(nodes[repIdx])
+ }
+ return result
+}
diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go
new file mode 100644
index 0000000000..7242970b5a
--- /dev/null
+++ b/pkg/services/object_manager/placement/cache_test.go
@@ -0,0 +1,97 @@
+package placement_test
+
+import (
+ "strconv"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/stretchr/testify/require"
+)
+
+func TestContainerNodesCache(t *testing.T) {
+ const size = 3
+
+ nodes := [6]netmapSDK.NodeInfo{}
+ for i := range nodes {
+ nodes[i].SetAttribute("ATTR", strconv.Itoa(i))
+ }
+
+ nm := func(epoch uint64, nodes []netmapSDK.NodeInfo) *netmapSDK.NetMap {
+ var nm netmapSDK.NetMap
+ nm.SetEpoch(epoch)
+ nm.SetNodes(nodes)
+ return &nm
+ }
+
+ var pp netmapSDK.PlacementPolicy
+ require.NoError(t, pp.DecodeString("REP 1"))
+
+ t.Run("update netmap on the new epoch", func(t *testing.T) {
+ c := placement.NewContainerNodesCache(size)
+
+ cnr := cidtest.ID()
+ res, err := c.ContainerNodes(nm(1, nodes[0:1]), cnr, pp)
+ require.NoError(t, err)
+
+ // Use other nodes in the argument to ensure the result is taken from cache.
+ resCached, err := c.ContainerNodes(nm(1, nodes[1:2]), cnr, pp)
+ require.NoError(t, err)
+ require.Equal(t, res, resCached)
+
+ // Update epoch, netmap should be purged.
+ resCached, err = c.ContainerNodes(nm(2, nodes[2:3]), cnr, pp)
+ require.NoError(t, err)
+ require.NotEqual(t, res, resCached)
+ })
+ t.Run("cache uses container as a key", func(t *testing.T) {
+ c := placement.NewContainerNodesCache(size)
+
+ res1, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp)
+ require.NoError(t, err)
+
+ res2, err := c.ContainerNodes(nm(1, nodes[1:2]), cidtest.ID(), pp)
+ require.NoError(t, err)
+
+ require.NotEqual(t, res1, res2)
+ })
+ t.Run("cache respects size parameter", func(t *testing.T) {
+ c := placement.NewContainerNodesCache(size)
+
+ nm1 := nm(1, nodes[0:1])
+ nm2 := nm(1, nodes[1:2])
+ cnr := [size * 2]cid.ID{}
+ res := [size * 2][][]netmapSDK.NodeInfo{}
+ for i := range size * 2 {
+ cnr[i] = cidtest.ID()
+
+ var err error
+ res[i], err = c.ContainerNodes(nm1, cnr[i], pp)
+ require.NoError(t, err)
+ }
+
+ for i := size; i < size*2; i++ {
+ r, err := c.ContainerNodes(nm2, cnr[i], pp)
+ require.NoError(t, err)
+ require.Equal(t, res[i], r)
+ }
+ for i := range size {
+ r, err := c.ContainerNodes(nm2, cnr[i], pp)
+ require.NoError(t, err)
+ require.NotEqual(t, res[i], r)
+ }
+ })
+ t.Run("the error is propagated", func(t *testing.T) {
+ var pp netmapSDK.PlacementPolicy
+ r := netmapSDK.ReplicaDescriptor{}
+ r.SetNumberOfObjects(1)
+ r.SetSelectorName("Missing")
+ pp.AddReplicas(r)
+
+ c := placement.NewContainerNodesCache(size)
+ _, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp)
+ require.Error(t, err)
+ })
+}
diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go
new file mode 100644
index 0000000000..0f24a9d96c
--- /dev/null
+++ b/pkg/services/object_manager/placement/metrics.go
@@ -0,0 +1,185 @@
+package placement
+
+import (
+ "errors"
+ "fmt"
+ "maps"
+ "math"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
+ locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
+
+const (
+ attrPrefix = "$attribute:"
+
+ geoDistance = "$geoDistance"
+)
+
+type Metric interface {
+ CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int
+}
+
+type metricsParser struct {
+ locodeDBPath string
+ locodes map[string]locodedb.Point
+}
+
+type MetricParser interface {
+ ParseMetrics([]string) ([]Metric, error)
+}
+
+func NewMetricsParser(locodeDBPath string) (MetricParser, error) {
+ return &metricsParser{
+ locodeDBPath: locodeDBPath,
+ }, nil
+}
+
+func (p *metricsParser) initLocodes() error {
+ if len(p.locodes) != 0 {
+ return nil
+ }
+ if len(p.locodeDBPath) > 0 {
+ p.locodes = make(map[string]locodedb.Point)
+ locodeDB := locodebolt.New(locodebolt.Prm{
+ Path: p.locodeDBPath,
+ },
+ locodebolt.ReadOnly(),
+ )
+ err := locodeDB.Open()
+ if err != nil {
+ return err
+ }
+ defer locodeDB.Close()
+ err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) {
+ p.locodes[k] = v
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+ return errors.New("set path to locode database")
+}
+
+func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) {
+ var metrics []Metric
+ for _, raw := range priority {
+ if attr, found := strings.CutPrefix(raw, attrPrefix); found {
+ metrics = append(metrics, NewAttributeMetric(attr))
+ } else if raw == geoDistance {
+ err := p.initLocodes()
+ if err != nil {
+ return nil, err
+ }
+ if len(p.locodes) == 0 {
+ return nil, fmt.Errorf("provide locodes database for metric %s", raw)
+ }
+ m := NewGeoDistanceMetric(p.locodes)
+ metrics = append(metrics, m)
+ } else {
+ return nil, fmt.Errorf("unsupported priority metric %s", raw)
+ }
+ }
+ return metrics, nil
+}
+
+// attributeMetric describes priority metric based on attribute.
+type attributeMetric struct {
+ attribute string
+}
+
+// CalculateValue return [0] if from and to contains attribute attributeMetric.attribute and
+// the value of attribute is the same. In other case return [1].
+func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
+ fromAttr := from.Attribute(am.attribute)
+ toAttr := to.Attribute(am.attribute)
+ if len(fromAttr) > 0 && len(toAttr) > 0 && fromAttr == toAttr {
+ return 0
+ }
+ return 1
+}
+
+func NewAttributeMetric(attr string) Metric {
+ return &attributeMetric{attribute: attr}
+}
+
+// geoDistanceMetric describes priority metric based on attribute.
+type geoDistanceMetric struct {
+ locodes map[string]locodedb.Point
+ distance *atomic.Pointer[map[string]int]
+ mtx sync.Mutex
+}
+
+func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric {
+ d := atomic.Pointer[map[string]int]{}
+ m := make(map[string]int)
+ d.Store(&m)
+ gm := &geoDistanceMetric{
+ locodes: locodes,
+ distance: &d,
+ }
+ return gm
+}
+
+// CalculateValue return distance in kilometers between current node and provided,
+// if coordinates for provided node found. In other case return math.MaxInt.
+func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
+ fl := from.LOCODE()
+ tl := to.LOCODE()
+ if fl == tl {
+ return 0
+ }
+ m := gm.distance.Load()
+ if v, ok := (*m)[fl+tl]; ok {
+ return v
+ }
+ return gm.calculateDistance(fl, tl)
+}
+
+func (gm *geoDistanceMetric) calculateDistance(from, to string) int {
+ gm.mtx.Lock()
+ defer gm.mtx.Unlock()
+ od := gm.distance.Load()
+ if v, ok := (*od)[from+to]; ok {
+ return v
+ }
+ nd := maps.Clone(*od)
+ var dist int
+ pointFrom, okFrom := gm.locodes[from]
+ pointTo, okTo := gm.locodes[to]
+ if okFrom && okTo {
+ dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude()))
+ } else {
+ dist = math.MaxInt
+ }
+ nd[from+to] = dist
+ gm.distance.Store(&nd)
+
+ return dist
+}
+
+// distance return amount of KM between two points.
+// Parameters are latitude and longitude of point 1 and 2 in decimal degrees.
+// Original implementation can be found here https://www.geodatasource.com/developers/go.
+func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 {
+ radLat1 := math.Pi * lt1 / 180
+ radLat2 := math.Pi * lt2 / 180
+ radTheta := math.Pi * (ln1 - ln2) / 180
+
+ dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta)
+
+ if dist > 1 {
+ dist = 1
+ }
+
+ dist = math.Acos(dist)
+ dist = dist * 180 / math.Pi
+ dist = dist * 60 * 1.1515 * 1.609344
+
+ return dist
+}
diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go
index 01d607020d..b3f8d9c03f 100644
--- a/pkg/services/object_manager/placement/netmap.go
+++ b/pkg/services/object_manager/placement/netmap.go
@@ -1,25 +1,19 @@
package placement
import (
+ "context"
"crypto/sha256"
"fmt"
- "sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/hashicorp/golang-lru/v2/simplelru"
)
type netMapBuilder struct {
- nmSrc netmap.Source
- // mtx protects lastNm and containerCache fields.
- mtx sync.Mutex
- lastNm *netmapSDK.NetMap
- // containerCache caches container nodes by ID. It is used to skip `GetContainerNodes` invocation if
- // neither netmap nor container has changed.
- containerCache simplelru.LRUCache[string, [][]netmapSDK.NodeInfo]
+ nmSrc netmap.Source
+ containerCache *ContainerNodesCache
}
type netMapSrc struct {
@@ -28,59 +22,35 @@ type netMapSrc struct {
nm *netmapSDK.NetMap
}
-// defaultContainerCacheSize is the default size for the container cache.
-const defaultContainerCacheSize = 10
-
func NewNetworkMapBuilder(nm *netmapSDK.NetMap) Builder {
- cache, _ := simplelru.NewLRU[string, [][]netmapSDK.NodeInfo](defaultContainerCacheSize, nil) // no error
return &netMapBuilder{
nmSrc: &netMapSrc{nm: nm},
- containerCache: cache,
+ containerCache: NewContainerNodesCache(0),
}
}
func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder {
- cache, _ := simplelru.NewLRU[string, [][]netmapSDK.NodeInfo](defaultContainerCacheSize, nil) // no error
return &netMapBuilder{
nmSrc: nmSrc,
- containerCache: cache,
+ containerCache: NewContainerNodesCache(0),
}
}
-func (s *netMapSrc) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
+func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) {
return s.nm, nil
}
-func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- nm, err := netmap.GetLatestNetworkMap(b.nmSrc)
+func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc)
if err != nil {
return nil, fmt.Errorf("could not get network map: %w", err)
}
- binCnr := make([]byte, sha256.Size)
- cnr.Encode(binCnr)
-
- b.mtx.Lock()
- if nm == b.lastNm {
- raw, ok := b.containerCache.Get(string(binCnr))
- b.mtx.Unlock()
- if ok {
- return BuildObjectPlacement(nm, raw, obj)
- }
- } else {
- b.containerCache.Purge()
- b.mtx.Unlock()
- }
-
- cn, err := nm.ContainerNodes(p, binCnr)
+ cn, err := b.containerCache.ContainerNodes(nm, cnr, p)
if err != nil {
- return nil, fmt.Errorf("could not get container nodes: %w", err)
+ return nil, err
}
- b.mtx.Lock()
- b.containerCache.Add(string(binCnr), cn)
- b.mtx.Unlock()
-
return BuildObjectPlacement(nm, cn, obj)
}
diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go
index 75d5fbfd15..a3f9af959b 100644
--- a/pkg/services/object_manager/placement/traverser.go
+++ b/pkg/services/object_manager/placement/traverser.go
@@ -1,8 +1,10 @@
package placement
import (
+ "context"
"errors"
"fmt"
+ "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -20,7 +22,12 @@ type Builder interface {
//
// Must return all container nodes if object identifier
// is nil.
- BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
+ BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
+}
+
+type NodeState interface {
+ // LocalNodeInfo return current node state in FrostFS API v2 NodeInfo structure.
+ LocalNodeInfo() *netmap.NodeInfo
}
// Option represents placement traverser option.
@@ -29,7 +36,7 @@ type Option func(*cfg)
// Traverser represents utility for controlling
// traversal of object placement vectors.
type Traverser struct {
- mtx *sync.RWMutex
+ mtx sync.RWMutex
vectors [][]netmap.NodeInfo
@@ -38,6 +45,7 @@ type Traverser struct {
type cfg struct {
trackCopies bool
+ copyNumbers []uint32
flatSuccess *uint32
@@ -49,6 +57,10 @@ type cfg struct {
policy netmap.PlacementPolicy
builder Builder
+
+ metrics []Metric
+
+ nodeState NodeState
}
const invalidOptsMsg = "invalid traverser options"
@@ -57,6 +69,9 @@ var errNilBuilder = errors.New("placement builder is nil")
var errNilPolicy = errors.New("placement policy is nil")
+var errCopiesNumberLen = errors.New("copies number accepts only one number or array with length " +
+ "equal to length of replicas")
+
func defaultCfg() *cfg {
return &cfg{
trackCopies: true,
@@ -64,7 +79,7 @@ func defaultCfg() *cfg {
}
// NewTraverser creates, initializes with options and returns Traverser instance.
-func NewTraverser(opts ...Option) (*Traverser, error) {
+func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
cfg := defaultCfg()
for i := range opts {
@@ -73,41 +88,85 @@ func NewTraverser(opts ...Option) (*Traverser, error) {
}
}
+ cnLen := len(cfg.copyNumbers)
+ if cnLen > 0 && cnLen != 1 && cnLen != cfg.policy.NumberOfReplicas() {
+ return nil, errCopiesNumberLen
+ }
+
if cfg.builder == nil {
return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilBuilder)
} else if !cfg.policySet {
return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy)
}
- ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy)
+ ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy)
if err != nil {
return nil, fmt.Errorf("could not build placement: %w", err)
}
+ // backward compatibility for scalar `copies_number`
+ if len(cfg.copyNumbers) == 1 && cfg.copyNumbers[0] != 0 {
+ cfg.flatSuccess = &cfg.copyNumbers[0]
+ }
+
var rem []int
- if cfg.flatSuccess != nil {
+ if len(cfg.metrics) > 0 && cfg.nodeState != nil {
+ rem = defaultCopiesVector(cfg.policy)
+ var unsortedVector []netmap.NodeInfo
+ var regularVector []netmap.NodeInfo
+ for i := range rem {
+ pivot := min(len(ns[i]), rem[i])
+ unsortedVector = append(unsortedVector, ns[i][:pivot]...)
+ regularVector = append(regularVector, ns[i][pivot:]...)
+ }
+ rem = []int{-1, -1}
+
+ sortedVector := sortVector(cfg, unsortedVector)
+ ns = [][]netmap.NodeInfo{sortedVector, regularVector}
+ } else if cfg.flatSuccess != nil {
ns = flatNodes(ns)
rem = []int{int(*cfg.flatSuccess)}
} else {
- replNum := cfg.policy.NumberOfReplicas()
- rem = make([]int, 0, replNum)
+ rem = defaultCopiesVector(cfg.policy)
- for i := 0; i < replNum; i++ {
- if cfg.trackCopies {
- rem = append(rem, int(cfg.policy.ReplicaNumberByIndex(i)))
- } else {
- rem = append(rem, -1)
+ // Bool flag which is set when cfg.copyNumbers contains not only zeros.
+ // In this case we should not modify `rem` slice unless track
+ // copies are ignored, because [0, ...] means that all copies should be
+ // stored before returning OK to the client.
+ var considerCopiesNumber bool
+ for _, val := range cfg.copyNumbers {
+ if val != 0 {
+ considerCopiesNumber = true
+ break
+ }
+ }
+
+ for i := range rem {
+ if !cfg.trackCopies {
+ rem[i] = -1
+ } else if considerCopiesNumber && len(cfg.copyNumbers) > i {
+ rem[i] = int(cfg.copyNumbers[i])
}
}
}
return &Traverser{
- mtx: new(sync.RWMutex),
rem: rem,
vectors: ns,
}, nil
}
+func defaultCopiesVector(policy netmap.PlacementPolicy) []int {
+ replNum := policy.NumberOfReplicas()
+ copyVector := make([]int, 0, replNum)
+
+ for i := range replNum {
+ copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount()))
+ }
+
+ return copyVector
+}
+
func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo {
sz := 0
for i := range ns {
@@ -122,6 +181,35 @@ func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo {
return [][]netmap.NodeInfo{flat}
}
+type nodeMetrics struct {
+ index int
+ metrics []int
+}
+
+func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo {
+ nm := make([]nodeMetrics, len(unsortedVector))
+ node := cfg.nodeState.LocalNodeInfo()
+
+ for i := range unsortedVector {
+ m := make([]int, len(cfg.metrics))
+ for j, pm := range cfg.metrics {
+ m[j] = pm.CalculateValue(node, &unsortedVector[i])
+ }
+ nm[i] = nodeMetrics{
+ index: i,
+ metrics: m,
+ }
+ }
+ slices.SortStableFunc(nm, func(a, b nodeMetrics) int {
+ return slices.Compare(a.metrics, b.metrics)
+ })
+ sortedVector := make([]netmap.NodeInfo, len(unsortedVector))
+ for i := range unsortedVector {
+ sortedVector[i] = unsortedVector[nm[i].index]
+ }
+ return sortedVector
+}
+
// Node is a descriptor of storage node with information required for intra-container communication.
type Node struct {
addresses network.AddressGroup
@@ -146,6 +234,15 @@ func (x Node) PublicKey() []byte {
return x.key
}
+// NewNode creates new Node.
+func NewNode(addresses network.AddressGroup, externalAddresses network.AddressGroup, key []byte) Node {
+ return Node{
+ addresses: addresses,
+ externalAddresses: externalAddresses,
+ key: key,
+ }
+}
+
// Next returns next unprocessed address of the object placement.
//
// Returns nil if no nodes left or traversal operation succeeded.
@@ -168,7 +265,7 @@ func (t *Traverser) Next() []Node {
nodes := make([]Node, count)
- for i := 0; i < count; i++ {
+ for i := range count {
err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i]))
if err != nil {
return nil
@@ -191,8 +288,8 @@ func (t *Traverser) Next() []Node {
func (t *Traverser) skipEmptyVectors() {
for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body
if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 {
- t.vectors = append(t.vectors[:i], t.vectors[i+1:]...)
- t.rem = append(t.rem[:i], t.rem[i+1:]...)
+ t.vectors = slices.Delete(t.vectors, i, i+1)
+ t.rem = slices.Delete(t.rem, i, i+1)
i--
} else {
break
@@ -259,9 +356,36 @@ func SuccessAfter(v uint32) Option {
}
}
+// ResetSuccessAfter resets flat success number setting option.
+func ResetSuccessAfter() Option {
+ return func(c *cfg) {
+ c.flatSuccess = nil
+ }
+}
+
// WithoutSuccessTracking disables success tracking in traversal.
func WithoutSuccessTracking() Option {
return func(c *cfg) {
c.trackCopies = false
}
}
+
+func WithCopyNumbers(v []uint32) Option {
+ return func(c *cfg) {
+ c.copyNumbers = v
+ }
+}
+
+// WithPriorityMetrics use provided priority metrics to sort nodes.
+func WithPriorityMetrics(m []Metric) Option {
+ return func(c *cfg) {
+ c.metrics = m
+ }
+}
+
+// WithNodeState provide state of the current node.
+func WithNodeState(s NodeState) Option {
+ return func(c *cfg) {
+ c.nodeState = s
+ }
+}
diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go
index 66fd8afe07..d1370f21e2 100644
--- a/pkg/services/object_manager/placement/traverser_test.go
+++ b/pkg/services/object_manager/placement/traverser_test.go
@@ -1,6 +1,8 @@
package placement
import (
+ "context"
+ "slices"
"strconv"
"testing"
@@ -17,12 +19,14 @@ type testBuilder struct {
vectors [][]netmap.NodeInfo
}
-func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return b.vectors, nil
}
func testNode(v uint32) (n netmap.NodeInfo) {
- n.SetNetworkEndpoints("/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v)))
+ ip := "/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v))
+ n.SetNetworkEndpoints(ip)
+ n.SetPublicKey([]byte(ip))
return n
}
@@ -31,8 +35,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
vc := make([][]netmap.NodeInfo, 0, len(v))
for i := range v {
- ns := make([]netmap.NodeInfo, len(v[i]))
- copy(ns, v[i])
+ ns := slices.Clone(v[i])
vc = append(vc, ns)
}
@@ -40,7 +43,15 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
return vc
}
-func testPlacement(t *testing.T, ss, rs []int) ([][]netmap.NodeInfo, container.Container) {
+func testPlacement(ss []int, rs []int) ([][]netmap.NodeInfo, container.Container) {
+ return placement(ss, rs, nil)
+}
+
+func testECPlacement(ss []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) {
+ return placement(ss, nil, ec)
+}
+
+func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) {
nodes := make([][]netmap.NodeInfo, 0, len(rs))
replicas := make([]netmap.ReplicaDescriptor, 0, len(rs))
num := uint32(0)
@@ -48,7 +59,7 @@ func testPlacement(t *testing.T, ss, rs []int) ([][]netmap.NodeInfo, container.C
for i := range ss {
ns := make([]netmap.NodeInfo, 0, ss[i])
- for j := 0; j < ss[i]; j++ {
+ for range ss[i] {
ns = append(ns, testNode(num))
num++
}
@@ -56,7 +67,12 @@ func testPlacement(t *testing.T, ss, rs []int) ([][]netmap.NodeInfo, container.C
nodes = append(nodes, ns)
var rd netmap.ReplicaDescriptor
- rd.SetNumberOfObjects(uint32(rs[i]))
+ if len(rs) > 0 {
+ rd.SetNumberOfObjects(uint32(rs[i]))
+ } else {
+ rd.SetECDataCount(uint32(ec[i][0]))
+ rd.SetECParityCount(uint32(ec[i][1]))
+ }
replicas = append(replicas, rd)
}
@@ -83,11 +99,11 @@ func TestTraverserObjectScenarios(t *testing.T) {
selectors := []int{2, 3}
replicas := []int{1, 2}
- nodes, cnr := testPlacement(t, selectors, replicas)
+ nodes, cnr := testPlacement(selectors, replicas)
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithoutSuccessTracking(),
@@ -112,11 +128,11 @@ func TestTraverserObjectScenarios(t *testing.T) {
selectors := []int{5, 3}
replicas := []int{2, 2}
- nodes, cnr := testPlacement(t, selectors, replicas)
+ nodes, cnr := testPlacement(selectors, replicas)
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -125,7 +141,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
)
require.NoError(t, err)
- for i := 0; i < len(nodes[0]); i++ {
+ for range len(nodes[0]) {
require.NotNil(t, tr.Next())
}
@@ -134,18 +150,18 @@ func TestTraverserObjectScenarios(t *testing.T) {
err = n.FromIterator(netmapcore.Node(nodes[1][0]))
require.NoError(t, err)
- require.Equal(t, []Node{{addresses: n}}, tr.Next())
+ require.Equal(t, []Node{{addresses: n, key: []byte("/ip4/0.0.0.0/tcp/5")}}, tr.Next())
})
t.Run("put scenario", func(t *testing.T) {
selectors := []int{5, 3}
replicas := []int{2, 2}
- nodes, cnr := testPlacement(t, selectors, replicas)
+ nodes, cnr := testPlacement(selectors, replicas)
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
)
@@ -164,7 +180,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
require.Empty(t, tr.Next())
require.False(t, tr.Success())
- for i := 0; i < replicas[curVector]; i++ {
+ for range replicas[curVector] {
tr.SubmitSuccess()
}
}
@@ -184,9 +200,9 @@ func TestTraverserObjectScenarios(t *testing.T) {
selectors := []int{2, 3}
replicas := []int{1, 2}
- nodes, cnr := testPlacement(t, selectors, replicas)
+ nodes, cnr := testPlacement(selectors, replicas)
- tr, err := NewTraverser(
+ tr, err := NewTraverser(context.Background(),
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local)
@@ -208,3 +224,430 @@ func TestTraverserObjectScenarios(t *testing.T) {
require.True(t, tr.Success())
})
}
+
+func TestTraverserRemValues(t *testing.T) {
+ selectors := []int{3, 4, 5}
+ replicas := []int{2, 3, 4}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+ nodesCopy := copyVectors(nodes)
+
+ testCases := [...]struct {
+ name string
+ copyNumbers []uint32
+ expectedRem []int
+ expectedErr error
+ }{
+ {
+ name: "zero copy numbers",
+ copyNumbers: []uint32{},
+ expectedRem: replicas,
+ },
+ {
+ name: "compatible zero copy numbers, len 1",
+ copyNumbers: []uint32{0},
+ expectedRem: replicas,
+ },
+ {
+ name: "compatible zero copy numbers, len 3",
+ copyNumbers: []uint32{0, 0, 0},
+ expectedRem: replicas,
+ },
+ {
+ name: "copy numbers for all replicas",
+ copyNumbers: []uint32{1, 1, 1},
+ expectedRem: []int{1, 1, 1},
+ },
+ {
+ name: "single copy numbers for multiple replicas",
+ copyNumbers: []uint32{1},
+ expectedRem: []int{1}, // may be a bit unexpected
+ },
+ {
+ name: "multiple copy numbers for multiple replicas",
+ copyNumbers: []uint32{1, 1, 4},
+ expectedRem: []int{1, 1, 4},
+ },
+ {
+ name: "incompatible copies number vector",
+ copyNumbers: []uint32{1, 1},
+ expectedErr: errCopiesNumberLen,
+ },
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{vectors: nodesCopy}),
+ WithCopyNumbers(testCase.copyNumbers),
+ )
+ if testCase.expectedErr == nil {
+ require.NoError(t, err, testCase.name)
+ require.Equal(t, testCase.expectedRem, tr.rem, testCase.name)
+ } else {
+ require.Error(t, err, testCase.expectedErr, testCase.name)
+ }
+ })
+ }
+}
+
+type nodeState struct {
+ node *netmap.NodeInfo
+}
+
+func (n *nodeState) LocalNodeInfo() *netmap.NodeInfo {
+ return n.node
+}
+
+func TestTraverserPriorityMetrics(t *testing.T) {
+ t.Run("one rep one metric", func(t *testing.T) {
+ selectors := []int{4}
+ replicas := []int{3}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("ClusterName", "A")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("ClusterName", "A")
+ // Node_2, PK - ip4/0.0.0.0/tcp/2
+ nodes[0][2].SetAttribute("ClusterName", "B")
+ // Node_3, PK - ip4/0.0.0.0/tcp/3
+ nodes[0][3].SetAttribute("ClusterName", "B")
+
+ sdkNode := testNode(5)
+ sdkNode.SetAttribute("ClusterName", "B")
+
+ nodesCopy := copyVectors(nodes)
+
+ m := []Metric{NewAttributeMetric("ClusterName")}
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `ClusterName` the order will be:
+ // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}]
+ // With priority metric `ClusterName` and current node in cluster B
+ // the order should be:
+ // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 3, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey()))
+
+ next = tr.Next()
+ // The last node is
+ require.Equal(t, 1, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+
+ t.Run("one rep one metric fewer nodes", func(t *testing.T) {
+ selectors := []int{2}
+ replicas := []int{3}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("ClusterName", "A")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("ClusterName", "B")
+
+ sdkNode := testNode(5)
+ sdkNode.SetAttribute("ClusterName", "B")
+
+ nodesCopy := copyVectors(nodes)
+
+ m := []Metric{NewAttributeMetric("ClusterName")}
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `ClusterName` the order will be:
+ // [ {Node_0 A}, {Node_1 A} ]
+ // With priority metric `ClusterName` and current node in cluster B
+ // the order should be:
+ // [ {Node_1 B}, {Node_0 A} ]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+
+ t.Run("two reps two metrics", func(t *testing.T) {
+ selectors := []int{3, 3}
+ replicas := []int{2, 2}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // REPLICA #1
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("ClusterName", "A")
+ nodes[0][0].SetAttribute("UN-LOCODE", "RU LED")
+
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("ClusterName", "A")
+ nodes[0][1].SetAttribute("UN-LOCODE", "FI HEL")
+
+ // Node_2, PK - ip4/0.0.0.0/tcp/2
+ nodes[0][2].SetAttribute("ClusterName", "A")
+ nodes[0][2].SetAttribute("UN-LOCODE", "RU LED")
+
+ // REPLICA #2
+ // Node_3 ip4/0.0.0.0/tcp/3
+ nodes[1][0].SetAttribute("ClusterName", "B")
+ nodes[1][0].SetAttribute("UN-LOCODE", "RU MOW")
+
+ // Node_4, PK - ip4/0.0.0.0/tcp/4
+ nodes[1][1].SetAttribute("ClusterName", "B")
+ nodes[1][1].SetAttribute("UN-LOCODE", "RU DME")
+
+ // Node_5, PK - ip4/0.0.0.0/tcp/5
+ nodes[1][2].SetAttribute("ClusterName", "B")
+ nodes[1][2].SetAttribute("UN-LOCODE", "RU MOW")
+
+ sdkNode := testNode(9)
+ sdkNode.SetAttribute("ClusterName", "B")
+ sdkNode.SetAttribute("UN-LOCODE", "RU DME")
+
+ nodesCopy := copyVectors(nodes)
+
+ m := []Metric{
+ NewAttributeMetric("ClusterName"),
+ NewAttributeMetric("UN-LOCODE"),
+ }
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Check that nodes in the same cluster and
+ // in the same location should be the first in slice.
+ // Nodes which are follow criteria but stay outside the replica
+ // should be in the next slice.
+
+ next := tr.Next()
+ require.Equal(t, 4, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey()))
+
+ next = tr.Next()
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+
+ sdkNode.SetAttribute("ClusterName", "B")
+ sdkNode.SetAttribute("UN-LOCODE", "RU MOW")
+
+ nodesCopy = copyVectors(nodes)
+
+ tr, err = NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ next = tr.Next()
+ require.Equal(t, 4, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey()))
+
+ next = tr.Next()
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+
+ sdkNode.SetAttribute("ClusterName", "A")
+ sdkNode.SetAttribute("UN-LOCODE", "RU LED")
+
+ nodesCopy = copyVectors(nodes)
+
+ tr, err = NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ next = tr.Next()
+ require.Equal(t, 4, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[2].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[3].PublicKey()))
+
+ next = tr.Next()
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+
+ t.Run("ec container", func(t *testing.T) {
+ selectors := []int{4}
+ ec := [][]int{{2, 1}}
+
+ nodes, cnr := testECPlacement(selectors, ec)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("ClusterName", "A")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("ClusterName", "A")
+ // Node_2, PK - ip4/0.0.0.0/tcp/2
+ nodes[0][2].SetAttribute("ClusterName", "B")
+ // Node_3, PK - ip4/0.0.0.0/tcp/3
+ nodes[0][3].SetAttribute("ClusterName", "B")
+
+ sdkNode := testNode(5)
+ sdkNode.SetAttribute("ClusterName", "B")
+
+ nodesCopy := copyVectors(nodes)
+
+ m := []Metric{NewAttributeMetric("ClusterName")}
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `ClusterName` the order will be:
+ // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}]
+ // With priority metric `ClusterName` and current node in cluster B
+ // the order should be:
+ // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 3, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey()))
+
+ next = tr.Next()
+ // The last node is
+ require.Equal(t, 1, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+
+ t.Run("one rep one geo metric", func(t *testing.T) {
+ t.Skip()
+ selectors := []int{2}
+ replicas := []int{2}
+
+ nodes, cnr := testPlacement(selectors, replicas)
+
+ // Node_0, PK - ip4/0.0.0.0/tcp/0
+ nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW")
+ // Node_1, PK - ip4/0.0.0.0/tcp/1
+ nodes[0][1].SetAttribute("UN-LOCODE", "RU LED")
+
+ sdkNode := testNode(2)
+ sdkNode.SetAttribute("UN-LOCODE", "FI HEL")
+
+ nodesCopy := copyVectors(nodes)
+
+ parser, err := NewMetricsParser("/path/to/locode_db")
+ require.NoError(t, err)
+ m, err := parser.ParseMetrics([]string{geoDistance})
+ require.NoError(t, err)
+
+ tr, err := NewTraverser(context.Background(),
+ ForContainer(cnr),
+ UseBuilder(&testBuilder{
+ vectors: nodesCopy,
+ }),
+ WithoutSuccessTracking(),
+ WithPriorityMetrics(m),
+ WithNodeState(&nodeState{
+ node: &sdkNode,
+ }),
+ )
+ require.NoError(t, err)
+
+ // Without priority metric `$geoDistance` the order will be:
+ // [ {Node_0 RU MOW}, {Node_1 RU LED}]
+ // With priority metric `$geoDistance` the order should be:
+ // [ {Node_1 RU LED}, {Node_0 RU MOW}]
+ next := tr.Next()
+ require.NotNil(t, next)
+ require.Equal(t, 2, len(next))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
+ require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
+
+ next = tr.Next()
+ require.Nil(t, next)
+ })
+}
diff --git a/pkg/services/object_manager/storagegroup/collect.go b/pkg/services/object_manager/storagegroup/collect.go
deleted file mode 100644
index d9578dea3f..0000000000
--- a/pkg/services/object_manager/storagegroup/collect.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package storagegroup
-
-import (
- objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
-)
-
-// CollectMembers creates new storage group structure and fills it
-// with information about members collected via HeadReceiver.
-//
-// Resulting storage group consists of physically stored objects only.
-func CollectMembers(r objutil.HeadReceiver, cnr cid.ID, members []oid.ID, calcHomoHash bool) (*storagegroup.StorageGroup, error) {
- var (
- sumPhySize uint64
- phyMembers []oid.ID
- phyHashes [][]byte
- addr oid.Address
- sg storagegroup.StorageGroup
- )
-
- addr.SetContainer(cnr)
-
- for i := range members {
- addr.SetObject(members[i])
-
- if err := objutil.IterateAllSplitLeaves(r, addr, func(leaf *object.Object) {
- id, ok := leaf.ID()
- if !ok {
- return
- }
-
- phyMembers = append(phyMembers, id)
- sumPhySize += leaf.PayloadSize()
- cs, _ := leaf.PayloadHomomorphicHash()
-
- if calcHomoHash {
- phyHashes = append(phyHashes, cs.Value())
- }
- }); err != nil {
- return nil, err
- }
- }
-
- sg.SetMembers(phyMembers)
- sg.SetValidationDataSize(sumPhySize)
-
- if calcHomoHash {
- sumHash, err := tz.Concat(phyHashes)
- if err != nil {
- return nil, err
- }
-
- var cs checksum.Checksum
- tzHash := [64]byte{}
- copy(tzHash[:], sumHash)
- cs.SetTillichZemor(tzHash)
-
- sg.SetValidationDataHash(cs)
- }
-
- return &sg, nil
-}
diff --git a/pkg/services/object_manager/storagegroup/search.go b/pkg/services/object_manager/storagegroup/search.go
deleted file mode 100644
index 39019aa6de..0000000000
--- a/pkg/services/object_manager/storagegroup/search.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package storagegroup
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
-)
-
-// SearchQuery returns search query to filter
-// objects with storage group content.
-func SearchQuery() object.SearchFilters {
- fs := object.SearchFilters{}
- fs.AddTypeFilter(object.MatchStringEqual, object.TypeStorageGroup)
-
- return fs
-}
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index 379dad0f5c..e5f001d5a2 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -4,9 +4,10 @@ import (
"context"
"strconv"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
"go.uber.org/zap"
@@ -20,7 +21,7 @@ type Source interface {
//
// Tombstone MUST return (nil, nil) if requested tombstone is
// missing in the storage for the provided epoch.
- Tombstone(ctx context.Context, a oid.Address, epoch uint64) (*object.Object, error)
+ Tombstone(ctx context.Context, a oid.Address, epoch uint64) (*objectSDK.Object, error)
}
// ExpirationChecker is a tombstone source wrapper.
@@ -56,14 +57,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
ts, err := g.tsSource.Tombstone(ctx, a, epoch)
if err != nil {
- log.Warn(
- "tombstone getter: could not get the tombstone the source",
+ log.Warn(ctx,
+ logs.TombstoneCouldNotGetTheTombstoneTheSource,
zap.Error(err),
)
- } else {
- if ts != nil {
- return g.handleTS(addrStr, ts, epoch)
- }
+ } else if ts != nil {
+ return g.handleTS(ctx, addrStr, ts, epoch)
}
// requested tombstone not
@@ -71,13 +70,13 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
return false
}
-func (g *ExpirationChecker) handleTS(addr string, ts *object.Object, reqEpoch uint64) bool {
+func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
for _, atr := range ts.Attributes() {
if atr.Key() == objectV2.SysAttributeExpEpoch {
epoch, err := strconv.ParseUint(atr.Value(), 10, 64)
if err != nil {
- g.log.Warn(
- "tombstone getter: could not parse tombstone expiration epoch",
+ g.log.Warn(ctx,
+ logs.TombstoneExpirationParseFailure,
zap.Error(err),
)
diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go
index 9d33e8179b..2147a32fe8 100644
--- a/pkg/services/object_manager/tombstone/constructor.go
+++ b/pkg/services/object_manager/tombstone/constructor.go
@@ -3,6 +3,7 @@ package tombstone
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
"go.uber.org/zap"
@@ -23,7 +24,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- log: &logger.Logger{Logger: zap.NewNop()},
+ log: logger.NewLoggerWrapper(zap.NewNop()),
cacheSize: defaultLRUCacheSize,
}
}
@@ -49,9 +50,7 @@ func NewChecker(oo ...Option) *ExpirationChecker {
panicOnNil(cfg.tsSource, "Tombstone source")
cache, err := lru.New[string, uint64](cfg.cacheSize)
- if err != nil {
- panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err))
- }
+ assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize))
return &ExpirationChecker{
cache: cache,
diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go
index 8dd0dcabb0..9759418471 100644
--- a/pkg/services/object_manager/tombstone/source/source.go
+++ b/pkg/services/object_manager/tombstone/source/source.go
@@ -2,13 +2,12 @@ package tsourse
import (
"context"
- "errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -40,9 +39,7 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) {
// Panics if any of the provided options does not allow
// constructing a valid tombstone local Source.
func NewSource(p TombstoneSourcePrm) Source {
- if p.s == nil {
- panic("Tombstone source: nil object service")
- }
+ assert.False(p.s == nil, "Tombstone source: nil object service")
return Source(p)
}
@@ -59,7 +56,7 @@ func (h *headerWriter) WriteHeader(_ context.Context, o *objectSDK.Object) error
// Tombstone checks if the engine stores tombstone.
// Returns nil, nil if the tombstone has been removed
// or marked for removal.
-func (s Source) Tombstone(ctx context.Context, a oid.Address, _ uint64) (*object.Object, error) {
+func (s Source) Tombstone(ctx context.Context, a oid.Address, _ uint64) (*objectSDK.Object, error) {
var hr headerWriter
var headPrm getsvc.HeadPrm
@@ -69,7 +66,7 @@ func (s Source) Tombstone(ctx context.Context, a oid.Address, _ uint64) (*object
err := s.s.Head(ctx, headPrm)
switch {
- case errors.As(err, new(apistatus.ObjectNotFound)) || errors.As(err, new(apistatus.ObjectAlreadyRemoved)):
+ case client.IsErrObjectNotFound(err) || client.IsErrObjectAlreadyRemoved(err):
return nil, nil
case err != nil:
return nil, fmt.Errorf("could not get tombstone from the source: %w", err)
diff --git a/pkg/services/object_manager/transformer/fmt.go b/pkg/services/object_manager/transformer/fmt.go
deleted file mode 100644
index c9b5dc9672..0000000000
--- a/pkg/services/object_manager/transformer/fmt.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package transformer
-
-import (
- "crypto/ecdsa"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
-)
-
-type formatter struct {
- prm *FormatterParams
-
- obj *object.Object
-
- sz uint64
-}
-
-// FormatterParams groups NewFormatTarget parameters.
-type FormatterParams struct {
- Key *ecdsa.PrivateKey
-
- NextTarget ObjectTarget
-
- SessionToken *session.Object
-
- NetworkState netmap.State
-}
-
-// NewFormatTarget returns ObjectTarget instance that finalizes object structure
-// and writes it to the next target.
-//
-// Chunks must be written before the WriteHeader call.
-//
-// Object changes:
-// - sets version to current SDK version;
-// - sets payload size to the total length of all written chunks;
-// - sets session token;
-// - sets number of creation epoch;
-// - calculates and sets verification fields (ID, Signature).
-func NewFormatTarget(p *FormatterParams) ObjectTarget {
- return &formatter{
- prm: p,
- }
-}
-
-func (f *formatter) WriteHeader(obj *object.Object) error {
- f.obj = obj
-
- return nil
-}
-
-func (f *formatter) Write(p []byte) (n int, err error) {
- n, err = f.prm.NextTarget.Write(p)
-
- f.sz += uint64(n)
-
- return
-}
-
-func (f *formatter) Close() (*AccessIdentifiers, error) {
- curEpoch := f.prm.NetworkState.CurrentEpoch()
- ver := version.Current()
-
- f.obj.SetVersion(&ver)
- f.obj.SetPayloadSize(f.sz)
- f.obj.SetSessionToken(f.prm.SessionToken)
- f.obj.SetCreationEpoch(curEpoch)
-
- var (
- parID *oid.ID
- parHdr *object.Object
- )
-
- if par := f.obj.Parent(); par != nil && par.Signature() == nil {
- rawPar := object.NewFromV2(par.ToV2())
-
- rawPar.SetSessionToken(f.prm.SessionToken)
- rawPar.SetCreationEpoch(curEpoch)
-
- if err := object.SetIDWithSignature(*f.prm.Key, rawPar); err != nil {
- return nil, fmt.Errorf("could not finalize parent object: %w", err)
- }
-
- id, _ := rawPar.ID()
- parID = &id
- parHdr = rawPar
-
- f.obj.SetParent(parHdr)
- }
-
- if err := object.SetIDWithSignature(*f.prm.Key, f.obj); err != nil {
- return nil, fmt.Errorf("could not finalize object: %w", err)
- }
-
- if err := f.prm.NextTarget.WriteHeader(f.obj); err != nil {
- return nil, fmt.Errorf("could not write header to next target: %w", err)
- }
-
- if _, err := f.prm.NextTarget.Close(); err != nil {
- return nil, fmt.Errorf("could not close next target: %w", err)
- }
-
- id, _ := f.obj.ID()
-
- return new(AccessIdentifiers).
- WithSelfID(id).
- WithParentID(parID).
- WithParent(parHdr), nil
-}
diff --git a/pkg/services/object_manager/transformer/transformer.go b/pkg/services/object_manager/transformer/transformer.go
deleted file mode 100644
index 7b717d3dff..0000000000
--- a/pkg/services/object_manager/transformer/transformer.go
+++ /dev/null
@@ -1,294 +0,0 @@
-package transformer
-
-import (
- "crypto/sha256"
- "fmt"
- "hash"
- "io"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
-)
-
-type payloadSizeLimiter struct {
- maxSize, written uint64
-
- withoutHomomorphicHash bool
-
- targetInit func() ObjectTarget
-
- target ObjectTarget
-
- current, parent *object.Object
-
- currentHashers, parentHashers []*payloadChecksumHasher
-
- previous []oid.ID
-
- chunkWriter io.Writer
-
- splitID *object.SplitID
-
- parAttrs []object.Attribute
-}
-
-type payloadChecksumHasher struct {
- hasher hash.Hash
-
- checksumWriter func([]byte)
-}
-
-// NewPayloadSizeLimiter returns ObjectTarget instance that restricts payload length
-// of the writing object and writes generated objects to targets from initializer.
-//
-// Calculates and adds homomorphic hash to resulting objects only if withoutHomomorphicHash
-// is false.
-//
-// Objects w/ payload size less or equal than max size remain untouched.
-func NewPayloadSizeLimiter(maxSize uint64, withoutHomomorphicHash bool, targetInit TargetInitializer) ObjectTarget {
- return &payloadSizeLimiter{
- maxSize: maxSize,
- withoutHomomorphicHash: withoutHomomorphicHash,
- targetInit: targetInit,
- splitID: object.NewSplitID(),
- }
-}
-
-func (s *payloadSizeLimiter) WriteHeader(hdr *object.Object) error {
- s.current = fromObject(hdr)
-
- s.initialize()
-
- return nil
-}
-
-func (s *payloadSizeLimiter) Write(p []byte) (int, error) {
- if err := s.writeChunk(p); err != nil {
- return 0, err
- }
-
- return len(p), nil
-}
-
-func (s *payloadSizeLimiter) Close() (*AccessIdentifiers, error) {
- return s.release(true)
-}
-
-func (s *payloadSizeLimiter) initialize() {
- // if it is an object after the 1st
- if ln := len(s.previous); ln > 0 {
- // initialize parent object once (after 1st object)
- if ln == 1 {
- s.detachParent()
- }
-
- // set previous object to the last previous identifier
- s.current.SetPreviousID(s.previous[ln-1])
- }
-
- s.initializeCurrent()
-}
-
-func fromObject(obj *object.Object) *object.Object {
- cnr, _ := obj.ContainerID()
-
- res := object.New()
- res.SetContainerID(cnr)
- res.SetOwnerID(obj.OwnerID())
- res.SetAttributes(obj.Attributes()...)
- res.SetType(obj.Type())
-
- // obj.SetSplitID creates splitHeader but we don't need to do it in case
- // of small objects, so we should make nil check.
- if obj.SplitID() != nil {
- res.SetSplitID(obj.SplitID())
- }
-
- return res
-}
-
-func (s *payloadSizeLimiter) initializeCurrent() {
- // initialize current object target
- s.target = s.targetInit()
-
- // create payload hashers
- s.currentHashers = payloadHashersForObject(s.current, s.withoutHomomorphicHash)
-
- // compose multi-writer from target and all payload hashers
- ws := make([]io.Writer, 0, 1+len(s.currentHashers)+len(s.parentHashers))
-
- ws = append(ws, s.target)
-
- for i := range s.currentHashers {
- ws = append(ws, s.currentHashers[i].hasher)
- }
-
- for i := range s.parentHashers {
- ws = append(ws, s.parentHashers[i].hasher)
- }
-
- s.chunkWriter = io.MultiWriter(ws...)
-}
-
-func payloadHashersForObject(obj *object.Object, withoutHomomorphicHash bool) []*payloadChecksumHasher {
- hashers := make([]*payloadChecksumHasher, 0, 2)
-
- hashers = append(hashers, &payloadChecksumHasher{
- hasher: sha256.New(),
- checksumWriter: func(binChecksum []byte) {
- if ln := len(binChecksum); ln != sha256.Size {
- panic(fmt.Sprintf("wrong checksum length: expected %d, has %d", sha256.Size, ln))
- }
-
- csSHA := [sha256.Size]byte{}
- copy(csSHA[:], binChecksum)
-
- var cs checksum.Checksum
- cs.SetSHA256(csSHA)
-
- obj.SetPayloadChecksum(cs)
- },
- })
-
- if !withoutHomomorphicHash {
- hashers = append(hashers, &payloadChecksumHasher{
- hasher: tz.New(),
- checksumWriter: func(binChecksum []byte) {
- if ln := len(binChecksum); ln != tz.Size {
- panic(fmt.Sprintf("wrong checksum length: expected %d, has %d", tz.Size, ln))
- }
-
- csTZ := [tz.Size]byte{}
- copy(csTZ[:], binChecksum)
-
- var cs checksum.Checksum
- cs.SetTillichZemor(csTZ)
-
- obj.SetPayloadHomomorphicHash(cs)
- },
- })
- }
-
- return hashers
-}
-
-func (s *payloadSizeLimiter) release(finalize bool) (*AccessIdentifiers, error) {
- // Arg finalize is true only when called from Close method.
- // We finalize parent and generate linking objects only if it is more
- // than 1 object in split-chain.
- withParent := finalize && len(s.previous) > 0
-
- if withParent {
- writeHashes(s.parentHashers)
- s.parent.SetPayloadSize(s.written)
- s.current.SetParent(s.parent)
- }
-
- // release current object
- writeHashes(s.currentHashers)
-
- // release current, get its id
- if err := s.target.WriteHeader(s.current); err != nil {
- return nil, fmt.Errorf("could not write header: %w", err)
- }
-
- ids, err := s.target.Close()
- if err != nil {
- return nil, fmt.Errorf("could not close target: %w", err)
- }
-
- // save identifier of the released object
- s.previous = append(s.previous, ids.SelfID())
-
- if withParent {
- // generate and release linking object
- s.initializeLinking(ids.Parent())
- s.initializeCurrent()
-
- if _, err := s.release(false); err != nil {
- return nil, fmt.Errorf("could not release linking object: %w", err)
- }
- }
-
- return ids, nil
-}
-
-func writeHashes(hashers []*payloadChecksumHasher) {
- for i := range hashers {
- hashers[i].checksumWriter(hashers[i].hasher.Sum(nil))
- }
-}
-
-func (s *payloadSizeLimiter) initializeLinking(parHdr *object.Object) {
- s.current = fromObject(s.current)
- s.current.SetParent(parHdr)
- s.current.SetChildren(s.previous...)
- s.current.SetSplitID(s.splitID)
-}
-
-func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
- // statement is true if the previous write of bytes reached exactly the boundary.
- if s.written > 0 && s.written%s.maxSize == 0 {
- if s.written == s.maxSize {
- s.prepareFirstChild()
- }
-
- // we need to release current object
- if _, err := s.release(false); err != nil {
- return fmt.Errorf("could not release object: %w", err)
- }
-
- // initialize another object
- s.initialize()
- }
-
- var (
- ln = uint64(len(chunk))
- cut = ln
- leftToEdge = s.maxSize - s.written%s.maxSize
- )
-
- // write bytes no further than the boundary of the current object
- if ln > leftToEdge {
- cut = leftToEdge
- }
-
- if _, err := s.chunkWriter.Write(chunk[:cut]); err != nil {
- return fmt.Errorf("could not write chunk to target: %w", err)
- }
-
- // increase written bytes counter
- s.written += cut
-
- // if there are more bytes in buffer we call method again to start filling another object
- if ln > leftToEdge {
- return s.writeChunk(chunk[cut:])
- }
-
- return nil
-}
-
-func (s *payloadSizeLimiter) prepareFirstChild() {
- // initialize split header with split ID on first object in chain
- s.current.InitRelations()
- s.current.SetSplitID(s.splitID)
-
- // cut source attributes
- s.parAttrs = s.current.Attributes()
- s.current.SetAttributes()
-
- // attributes will be added to parent in detachParent
-}
-
-func (s *payloadSizeLimiter) detachParent() {
- s.parent = s.current
- s.current = fromObject(s.parent)
- s.parent.ResetRelations()
- s.parent.SetSignature(nil)
- s.parentHashers = s.currentHashers
-
- // return source attributes
- s.parent.SetAttributes(s.parAttrs...)
-}
diff --git a/pkg/services/object_manager/transformer/types.go b/pkg/services/object_manager/transformer/types.go
deleted file mode 100644
index 0fa3b64364..0000000000
--- a/pkg/services/object_manager/transformer/types.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package transformer
-
-import (
- "io"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-// AccessIdentifiers represents group of the object identifiers
-// that are returned after writing the object.
-// Consists of the ID of the stored object and the ID of the parent object.
-type AccessIdentifiers struct {
- par *oid.ID
-
- self oid.ID
-
- parHdr *object.Object
-}
-
-// ObjectTarget is an interface of the object writer.
-type ObjectTarget interface {
- // WriteHeader writes object header w/ payload part.
- // The payload of the object may be incomplete.
- //
- // Must be called exactly once. Control remains with the caller.
- // Missing a call or re-calling can lead to undefined behavior
- // that depends on the implementation.
- //
- // Must not be called after Close call.
- WriteHeader(*object.Object) error
-
- // Write writes object payload chunk.
- //
- // Can be called multiple times.
- //
- // Must not be called after Close call.
- io.Writer
-
- // Close is used to finish object writing.
- //
- // Close must return access identifiers of the object
- // that has been written.
- //
- // Must be called no more than once. Control remains with the caller.
- // Re-calling can lead to undefined behavior
- // that depends on the implementation.
- Close() (*AccessIdentifiers, error)
-}
-
-// TargetInitializer represents ObjectTarget constructor.
-type TargetInitializer func() ObjectTarget
-
-// SelfID returns identifier of the written object.
-func (a AccessIdentifiers) SelfID() oid.ID {
- return a.self
-}
-
-// WithSelfID returns AccessIdentifiers with passed self identifier.
-func (a *AccessIdentifiers) WithSelfID(v oid.ID) *AccessIdentifiers {
- res := a
- if res == nil {
- res = new(AccessIdentifiers)
- }
-
- res.self = v
-
- return res
-}
-
-// ParentID return identifier of the parent of the written object.
-func (a *AccessIdentifiers) ParentID() *oid.ID {
- if a != nil {
- return a.par
- }
-
- return nil
-}
-
-// WithParentID returns AccessIdentifiers with passed parent identifier.
-func (a *AccessIdentifiers) WithParentID(v *oid.ID) *AccessIdentifiers {
- res := a
- if res == nil {
- res = new(AccessIdentifiers)
- }
-
- res.par = v
-
- return res
-}
-
-// Parent return identifier of the parent of the written object.
-func (a *AccessIdentifiers) Parent() *object.Object {
- if a != nil {
- return a.parHdr
- }
-
- return nil
-}
-
-// WithParent returns AccessIdentifiers with passed parent identifier.
-func (a *AccessIdentifiers) WithParent(v *object.Object) *AccessIdentifiers {
- res := a
- if res == nil {
- res = new(AccessIdentifiers)
- }
-
- res.parHdr = v
-
- return res
-}
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index 1da07f45dc..dcaaec0b44 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -2,117 +2,66 @@ package policer
import (
"context"
- "errors"
+ "fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- headsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/head"
+ policycore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
-// tracks Policer's check progress.
-type nodeCache map[uint64]bool
+func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Policer.ProcessObject", trace.WithAttributes(
+ attribute.String("address", objInfo.Address.String()),
+ attribute.Bool("is_linking_object", objInfo.IsLinkingObject),
+ attribute.Bool("is_ec_part", objInfo.ECInfo != nil),
+ attribute.String("type", objInfo.Type.String()),
+ ))
+ defer span.End()
-func newNodeCache() *nodeCache {
- m := make(map[uint64]bool)
- return (*nodeCache)(&m)
-}
-
-func (n *nodeCache) set(node netmap.NodeInfo, val bool) {
- (*n)[node.Hash()] = val
-}
-
-// submits storage node as a candidate to store the object replica in case of
-// shortage.
-func (n *nodeCache) submitReplicaCandidate(node netmap.NodeInfo) {
- n.set(node, false)
-}
-
-// submits storage node as a current object replica holder.
-func (n *nodeCache) submitReplicaHolder(node netmap.NodeInfo) {
- n.set(node, true)
-}
-
-// processStatus returns current processing status of the storage node
-//
-// >0 if node does not currently hold the object
-// 0 if node already holds the object
-// <0 if node has not been processed yet
-func (n *nodeCache) processStatus(node netmap.NodeInfo) int8 {
- val, ok := (*n)[node.Hash()]
- if !ok {
- return -1
- }
-
- if val {
- return 0
- }
-
- return 1
-}
-
-// SubmitSuccessfulReplication marks given storage node as a current object
-// replica holder.
-//
-// SubmitSuccessfulReplication implements replicator.TaskResult.
-func (n *nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) {
- n.submitReplicaHolder(node)
-}
-
-func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.AddressWithType) {
- addr := addrWithType.Address
- idCnr := addr.Container()
- idObj := addr.Object()
-
- cnr, err := p.cnrSrc.Get(idCnr)
+ cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container())
if err != nil {
- p.log.Error("could not get container",
- zap.Stringer("cid", idCnr),
- zap.String("error", err.Error()),
- )
- if container.IsErrNotFound(err) {
- var prm engine.InhumePrm
- prm.MarkAsGarbage(addrWithType.Address)
- prm.WithForceRemoval()
-
- _, err := p.jobQueue.localStorage.Inhume(prm)
- if err != nil {
- p.log.Error("could not inhume object with missing container",
- zap.Stringer("cid", idCnr),
- zap.Stringer("oid", idObj),
- zap.String("error", err.Error()))
+ if client.IsErrContainerNotFound(err) {
+ existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container())
+ if errWasRemoved != nil {
+ return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved)
+ } else if existed {
+ err := p.buryFn(ctx, objInfo.Address)
+ if err != nil {
+ return fmt.Errorf("%s: %w", logs.PolicerCouldNotInhumeObjectWithMissingContainer, err)
+ }
}
}
- return
+ return fmt.Errorf("%s: %w", logs.PolicerCouldNotGetContainer, err)
}
policy := cnr.Value.PlacementPolicy()
- nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy)
+ if policycore.IsECPlacement(policy) {
+ return p.processECContainerObject(ctx, objInfo, cnr.Value)
+ }
+ return p.processRepContainerObject(ctx, objInfo, policy)
+}
+
+func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
+ idObj := objInfo.Address.Object()
+ idCnr := objInfo.Address.Container()
+ nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy)
if err != nil {
- p.log.Error("could not build placement vector for object",
- zap.Stringer("cid", idCnr),
- zap.String("error", err.Error()),
- )
-
- return
+ return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
- c := &processPlacementContext{
- Context: ctx,
- }
-
- var numOfContainerNodes int
- for i := range nn {
- numOfContainerNodes += len(nn[i])
- }
+ c := &placementRequirements{}
// cached info about already checked nodes
checkedNodes := newNodeCache()
@@ -120,25 +69,33 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
for i := range nn {
select {
case <-ctx.Done():
- return
+ return ctx.Err()
default:
}
- p.processNodes(c, addrWithType, nn[i], policy.ReplicaNumberByIndex(i), checkedNodes)
+ shortage := policy.ReplicaDescriptor(i).NumberOfObjects()
+ if objInfo.Type == objectSDK.TypeLock || objInfo.Type == objectSDK.TypeTombstone || objInfo.IsLinkingObject {
+ // all nodes of a container must store the `LOCK`, `TOMBSTONE` and linking objects
+ // for correct object removal protection:
+ // - `LOCK`, `TOMBSTONE` and linking objects are broadcast on their PUT requests;
+ // - `LOCK` object removal is a prohibited action in the GC.
+ shortage = uint32(len(nn[i]))
+ }
+
+ p.processRepNodes(ctx, c, objInfo, nn[i], shortage, checkedNodes)
}
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info("redundant local object copy detected",
- zap.Stringer("object", addr),
+ p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
+ zap.Stringer("object", objInfo.Address),
)
- p.cbRedundantCopy(addr)
+ p.cbRedundantCopy(ctx, objInfo.Address)
}
+ return nil
}
-type processPlacementContext struct {
- context.Context
-
+type placementRequirements struct {
// needLocalCopy is true if the current node must store an object according to the storage policy.
needLocalCopy bool
// removeLocalCopy is true if all copies are stored according to the storage policy
@@ -146,38 +103,14 @@ type processPlacementContext struct {
removeLocalCopy bool
}
-func (p *Policer) processNodes(ctx *processPlacementContext, addrWithType objectcore.AddressWithType,
- nodes []netmap.NodeInfo, shortage uint32, checkedNodes *nodeCache) {
- addr := addrWithType.Address
- typ := addrWithType.Type
- prm := new(headsvc.RemoteHeadPrm).WithObjectAddress(addr)
+func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRequirements, objInfo objectcore.Info,
+ nodes []netmap.NodeInfo, shortage uint32, checkedNodes nodeCache,
+) {
+ addr := objInfo.Address
// Number of copies that are stored on maintenance nodes.
var uncheckedCopies int
-
- handleMaintenance := func(node netmap.NodeInfo) {
- // consider remote nodes under maintenance as problem OK. Such
- // nodes MAY not respond with object, however, this is how we
- // prevent spam with new replicas.
- // However, additional copies should not be removed in this case,
- // because we can remove the only copy this way.
- checkedNodes.submitReplicaHolder(node)
- shortage--
- uncheckedCopies++
-
- p.log.Debug("consider node under maintenance as OK",
- zap.String("node", netmap.StringifyPublicKey(node)),
- )
- }
-
- if typ == object.TypeLock {
- // all nodes of a container must store the `LOCK` objects
- // for correct object removal protection:
- // - `LOCK` objects are broadcast on their PUT requests;
- // - `LOCK` object removal is a prohibited action in the GC.
- shortage = uint32(len(nodes))
- }
-
+ var candidates []netmap.NodeInfo
for i := 0; shortage > 0 && i < len(nodes); i++ {
select {
case <-ctx.Done():
@@ -185,96 +118,96 @@ func (p *Policer) processNodes(ctx *processPlacementContext, addrWithType object
default:
}
- if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) {
- ctx.needLocalCopy = true
-
- shortage--
- } else if nodes[i].IsMaintenance() {
- handleMaintenance(nodes[i])
- } else {
- if status := checkedNodes.processStatus(nodes[i]); status >= 0 {
- if status == 0 {
- // node already contains replica, no need to replicate
- nodes = append(nodes[:i], nodes[i+1:]...)
- i--
- shortage--
- }
-
+ var err error
+ st := checkedNodes.processStatus(nodes[i])
+ if !st.Processed() {
+ st, err = p.checkStatus(ctx, addr, nodes[i])
+ checkedNodes.set(nodes[i], st)
+ if st == nodeDoesNotHoldObject {
+ // 1. This is the first time the node is encountered (`!st.Processed()`).
+ // 2. The node does not hold object (`st == nodeDoesNotHoldObject`).
+ // So we need to try to put an object to it.
+ candidates = append(candidates, nodes[i])
continue
}
-
- callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
-
- _, err := p.remoteHeader.Head(callCtx, prm.WithNodeInfo(nodes[i]))
-
- cancel()
-
- if client.IsErrObjectNotFound(err) {
- checkedNodes.submitReplicaCandidate(nodes[i])
- continue
- }
-
- if isClientErrMaintenance(err) {
- handleMaintenance(nodes[i])
- } else if err != nil {
- p.log.Error("receive object header to check policy compliance",
- zap.Stringer("object", addr),
- zap.String("error", err.Error()),
- )
- } else {
- shortage--
- checkedNodes.submitReplicaHolder(nodes[i])
- }
}
- nodes = append(nodes[:i], nodes[i+1:]...)
- i--
+ switch st {
+ case nodeIsLocal:
+ requirements.needLocalCopy = true
+
+ shortage--
+ case nodeIsUnderMaintenance:
+ shortage--
+ uncheckedCopies++
+
+ p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK,
+ zap.String("node", netmap.StringifyPublicKey(nodes[i])))
+ case nodeHoldsObject:
+ shortage--
+ case nodeDoesNotHoldObject:
+ case nodeStatusUnknown:
+ p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
+ zap.Stringer("object", addr),
+ zap.Error(err))
+ default:
+ panic("unreachable")
+ }
}
- if shortage > 0 {
- p.log.Debug("shortage of object copies detected",
+ p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies)
+}
+
+func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) {
+ if p.netmapKeys.IsLocalKey(node.PublicKey()) {
+ return nodeIsLocal, nil
+ }
+ if node.Status().IsMaintenance() {
+ return nodeIsUnderMaintenance, nil
+ }
+
+ callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
+ _, err := p.remoteHeader(callCtx, node, addr, false)
+ cancel()
+
+ if err == nil {
+ return nodeHoldsObject, nil
+ }
+ if client.IsErrObjectNotFound(err) {
+ return nodeDoesNotHoldObject, nil
+ }
+ if client.IsErrNodeUnderMaintenance(err) {
+ return nodeIsUnderMaintenance, nil
+ }
+ return nodeStatusUnknown, err
+}
+
+func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
+ nodes []netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int,
+) {
+ switch {
+ case shortage > 0:
+ p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected,
zap.Stringer("object", addr),
zap.Uint32("shortage", shortage),
)
- var task replicator.Task
- task.SetObjectAddress(addr)
- task.SetNodes(nodes)
- task.SetCopiesNumber(shortage)
+ task := replicator.Task{
+ NumCopies: shortage,
+ Addr: addr,
+ Nodes: nodes,
+ }
- p.replicator.HandleTask(ctx, task, checkedNodes)
- } else if uncheckedCopies > 0 {
+ p.replicator.HandleReplicationTask(ctx, task, checkedNodes)
+
+ case uncheckedCopies > 0:
// If we have more copies than needed, but some of them are from the maintenance nodes,
// save the local copy.
- p.log.Debug("some of the copies are stored on nodes under maintenance, save local copy",
+ p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
zap.Int("count", uncheckedCopies))
- } else if uncheckedCopies == 0 {
+
+ case uncheckedCopies == 0:
// Safe to remove: checked all copies, shortage == 0.
- ctx.removeLocalCopy = true
+ requirements.removeLocalCopy = true
}
}
-
-// isClientErrMaintenance checks if err corresponds to FrostFS status return
-// which tells that node is currently under maintenance. Supports wrapped
-// errors.
-//
-// Similar to client.IsErr___ errors, consider replacing to FrostFS SDK.
-func isClientErrMaintenance(err error) bool {
- switch unwrapErr(err).(type) {
- default:
- return false
- case
- apistatus.NodeUnderMaintenance,
- *apistatus.NodeUnderMaintenance:
- return true
- }
-}
-
-// unwrapErr unwraps error using errors.Unwrap.
-func unwrapErr(err error) error {
- for e := errors.Unwrap(err); e != nil; e = errors.Unwrap(err) {
- err = e
- }
-
- return err
-}
diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go
index b40ee90d2b..69879c439f 100644
--- a/pkg/services/policer/check_test.go
+++ b/pkg/services/policer/check_test.go
@@ -11,14 +11,14 @@ func TestNodeCache(t *testing.T) {
cache := newNodeCache()
node := netmaptest.NodeInfo()
- require.Negative(t, cache.processStatus(node))
+ require.Equal(t, cache.processStatus(node), nodeNotProcessed)
cache.SubmitSuccessfulReplication(node)
- require.Zero(t, cache.processStatus(node))
+ require.Equal(t, cache.processStatus(node), nodeHoldsObject)
- cache.submitReplicaCandidate(node)
- require.Positive(t, cache.processStatus(node))
+ cache.set(node, nodeDoesNotHoldObject)
+ require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject)
- cache.submitReplicaHolder(node)
- require.Zero(t, cache.processStatus(node))
+ cache.set(node, nodeHoldsObject)
+ require.Equal(t, cache.processStatus(node), nodeHoldsObject)
}
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
new file mode 100644
index 0000000000..fbdeb31485
--- /dev/null
+++ b/pkg/services/policer/ec.go
@@ -0,0 +1,395 @@
+package policer
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+var errNoECinfoReturnded = errors.New("no EC info returned")
+
+type ecChunkProcessResult struct {
+ validPlacement bool
+ removeLocal bool
+}
+
+var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node")
+
+func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
+ if objInfo.ECInfo == nil {
+ return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy())
+ }
+ return p.processECContainerECObject(ctx, objInfo, cnr)
+}
+
+// processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects.
+// All of them must be stored on all of the container nodes.
+func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
+ objID := objInfo.Address.Object()
+ nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy)
+ if err != nil {
+ return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
+ }
+ if len(nn) != 1 || len(nn[0]) == 0 {
+ return errInvalidECPlacement
+ }
+
+ c := &placementRequirements{}
+ checkedNodes := newNodeCache()
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes)
+
+ if !c.needLocalCopy && c.removeLocalCopy {
+ p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
+ zap.Stringer("object", objInfo.Address),
+ )
+
+ p.cbRedundantCopy(ctx, objInfo.Address)
+ }
+ return nil
+}
+
+func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
+ nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
+ if err != nil {
+ return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
+ }
+ if len(nn) != 1 || len(nn[0]) == 0 {
+ return errInvalidECPlacement
+ }
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ res := p.processECChunk(ctx, objInfo, nn[0])
+ if !res.validPlacement {
+ // drop local chunk only if all required chunks are in place
+ res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr)
+ }
+ p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
+
+ if res.removeLocal {
+ p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
+ p.cbRedundantCopy(ctx, objInfo.Address)
+ }
+ return nil
+}
+
+// processECChunk replicates EC chunk if needed.
+func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult {
+ var removeLocalChunk bool
+ requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))]
+ if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
+ // current node is required node, we are happy
+ return ecChunkProcessResult{
+ validPlacement: true,
+ }
+ }
+ if requiredNode.Status().IsMaintenance() {
+ // consider maintenance mode has object, but do not drop local copy
+ p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ return ecChunkProcessResult{}
+ }
+
+ callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
+ _, err := p.remoteHeader(callCtx, requiredNode, objInfo.Address, false)
+ cancel()
+
+ if err == nil {
+ removeLocalChunk = true
+ } else if client.IsErrObjectNotFound(err) {
+ p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
+ task := replicator.Task{
+ NumCopies: 1,
+ Addr: objInfo.Address,
+ Nodes: []netmap.NodeInfo{requiredNode},
+ }
+ p.replicator.HandleReplicationTask(ctx, task, newNodeCache())
+ } else if client.IsErrNodeUnderMaintenance(err) {
+ // consider maintenance mode has object, but do not drop local copy
+ p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+ } else {
+ p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err))
+ }
+
+ return ecChunkProcessResult{
+ removeLocal: removeLocalChunk,
+ }
+}
+
+func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool {
+ var parentAddress oid.Address
+ parentAddress.SetContainer(objInfo.Address.Container())
+ parentAddress.SetObject(objInfo.ECInfo.ParentID)
+
+ requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo)
+ if len(requiredChunkIndexes) == 0 {
+ p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
+ return true
+ }
+
+ err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes)
+ if err != nil {
+ p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
+ return false
+ }
+ if len(requiredChunkIndexes) == 0 {
+ return true
+ }
+
+ indexToObjectID := make(map[uint32]oid.ID)
+ success := p.resolveRemoteECChunks(ctx, parentAddress, nodes, requiredChunkIndexes, indexToObjectID)
+ if !success {
+ return false
+ }
+
+ for index, candidates := range requiredChunkIndexes {
+ var addr oid.Address
+ addr.SetContainer(objInfo.Address.Container())
+ addr.SetObject(indexToObjectID[index])
+ p.replicator.HandlePullTask(ctx, replicator.Task{
+ Addr: addr,
+ Nodes: candidates,
+ Container: cnr,
+ })
+ }
+ // there was some missing chunks, it's not ok
+ return false
+}
+
+func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objectcore.Info) map[uint32][]netmap.NodeInfo {
+ requiredChunkIndexes := make(map[uint32][]netmap.NodeInfo)
+ for i, n := range nodes {
+ if uint32(i) == objInfo.ECInfo.Total {
+ break
+ }
+ if p.netmapKeys.IsLocalKey(n.PublicKey()) {
+ requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{}
+ }
+ }
+ return requiredChunkIndexes
+}
+
+func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Address, required map[uint32][]netmap.NodeInfo) error {
+ _, err := p.localHeader(ctx, parentAddress)
+ var eiErr *objectSDK.ECInfoError
+ if err == nil { // should not be happen
+ return errNoECinfoReturnded
+ }
+ if !errors.As(err, &eiErr) {
+ return err
+ }
+ for _, ch := range eiErr.ECInfo().Chunks {
+ delete(required, ch.Index)
+ }
+ return nil
+}
+
+func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool {
+ var eiErr *objectSDK.ECInfoError
+ for _, n := range nodes {
+ if p.netmapKeys.IsLocalKey(n.PublicKey()) {
+ continue
+ }
+ _, err := p.remoteHeader(ctx, n, parentAddress, true)
+ if !errors.As(err, &eiErr) {
+ continue
+ }
+ for _, ch := range eiErr.ECInfo().Chunks {
+ if candidates, ok := required[ch.Index]; ok {
+ candidates = append(candidates, n)
+ required[ch.Index] = candidates
+
+ var chunkID oid.ID
+ if err := chunkID.ReadFromV2(ch.ID); err != nil {
+ p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ return false
+ }
+ if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID {
+ p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
+ zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
+ return false
+ }
+ indexToObjectID[ch.Index] = chunkID
+ }
+ }
+ }
+
+ for index, candidates := range required {
+ if len(candidates) == 0 {
+ p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
+ return false
+ }
+ }
+
+ return true
+}
+
+func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) {
+ var parentAddress oid.Address
+ parentAddress.SetContainer(objInfo.Address.Container())
+ parentAddress.SetObject(objInfo.ECInfo.ParentID)
+ var eiErr *objectSDK.ECInfoError
+ resolved := make(map[uint32][]netmap.NodeInfo)
+ chunkIDs := make(map[uint32]oid.ID)
+ restore := true // do not restore EC chunks if some node returned error
+ for idx, n := range nodes {
+ if uint32(idx) >= objInfo.ECInfo.Total && uint32(len(resolved)) == objInfo.ECInfo.Total {
+ return
+ }
+ var err error
+ if p.netmapKeys.IsLocalKey(n.PublicKey()) {
+ _, err = p.localHeader(ctx, parentAddress)
+ } else {
+ _, err = p.remoteHeader(ctx, n, parentAddress, true)
+ }
+
+ if errors.As(err, &eiErr) {
+ for _, ch := range eiErr.ECInfo().Chunks {
+ resolved[ch.Index] = append(resolved[ch.Index], n)
+ var ecInfoChunkID oid.ID
+ if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil {
+ p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+ return
+ }
+ if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID {
+ p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
+ zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
+ return
+ }
+ chunkIDs[ch.Index] = ecInfoChunkID
+ }
+ } else if client.IsErrObjectAlreadyRemoved(err) {
+ restore = false
+ } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
+ p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
+ p.replicator.HandleReplicationTask(ctx, replicator.Task{
+ NumCopies: 1,
+ Addr: objInfo.Address,
+ Nodes: []netmap.NodeInfo{n},
+ }, newNodeCache())
+ restore = false
+ }
+ }
+ if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total {
+ return
+ }
+ if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() {
+ var found []uint32
+ for i := range resolved {
+ found = append(found, i)
+ }
+ p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
+ return
+ }
+ p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
+}
+
+func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID,
+ cnr containerSDK.Container,
+) {
+ c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
+ if err != nil {
+ p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ return
+ }
+ parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs)
+ if parts == nil {
+ return
+ }
+ key, err := p.keyStorage.GetKey(nil)
+ if err != nil {
+ p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ return
+ }
+ required := make([]bool, len(parts))
+ for i, p := range parts {
+ if p == nil {
+ required[i] = true
+ }
+ }
+ if err := c.ReconstructParts(parts, required, key); err != nil {
+ p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+ return
+ }
+ for idx, part := range parts {
+ if _, exists := existedChunks[uint32(idx)]; exists {
+ continue
+ }
+ var addr oid.Address
+ addr.SetContainer(parentAddress.Container())
+ pID, _ := part.ID()
+ addr.SetObject(pID)
+ targetNode := nodes[idx%len(nodes)]
+ if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
+ p.replicator.HandleLocalPutTask(ctx, replicator.Task{
+ Addr: addr,
+ Obj: part,
+ Container: cnr,
+ })
+ } else {
+ p.replicator.HandleReplicationTask(ctx, replicator.Task{
+ NumCopies: 1,
+ Addr: addr,
+ Nodes: []netmap.NodeInfo{targetNode},
+ Obj: part,
+ }, newNodeCache())
+ }
+ }
+}
+
+func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.Info, existedChunks map[uint32][]netmap.NodeInfo, parentAddress oid.Address, chunkIDs map[uint32]oid.ID) []*objectSDK.Object {
+ parts := make([]*objectSDK.Object, objInfo.ECInfo.Total)
+ errGroup, egCtx := errgroup.WithContext(ctx)
+ for idx, nodes := range existedChunks {
+ errGroup.Go(func() error {
+ var objID oid.Address
+ objID.SetContainer(parentAddress.Container())
+ objID.SetObject(chunkIDs[idx])
+ var obj *objectSDK.Object
+ var err error
+ for _, node := range nodes {
+ if p.netmapKeys.IsLocalKey(node.PublicKey()) {
+ obj, err = p.localObject(egCtx, objID)
+ } else {
+ obj, err = p.remoteObject(egCtx, node, objID)
+ }
+ if err == nil {
+ break
+ }
+ p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
+ }
+ if obj != nil {
+ parts[idx] = obj
+ }
+ return nil
+ })
+ }
+ if err := errGroup.Wait(); err != nil {
+ p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
+ return nil
+ }
+ return parts
+}
diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go
new file mode 100644
index 0000000000..c6980536b7
--- /dev/null
+++ b/pkg/services/policer/ec_test.go
@@ -0,0 +1,710 @@
+package policer
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "sync/atomic"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+func TestECChunkHasValidPlacement(t *testing.T) {
+ t.Parallel()
+ chunkAddress := oidtest.Address()
+ parentID := oidtest.ID()
+
+ var policy netmapSDK.PlacementPolicy
+ require.NoError(t, policy.DecodeString("EC 2.1"))
+
+ cnr := &container.Container{}
+ cnr.Value.Init()
+ cnr.Value.SetPlacementPolicy(policy)
+ containerSrc := containerSrc{
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ if id.Equals(chunkAddress.Container()) {
+ return cnr, nil
+ }
+ return nil, new(apistatus.ContainerNotFound)
+ },
+ }
+
+ nodes := make([]netmapSDK.NodeInfo, 4)
+ for i := range nodes {
+ nodes[i].SetPublicKey([]byte{byte(i)})
+ }
+
+ placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ if cnr.Equals(chunkAddress.Container()) && obj.Equals(parentID) {
+ return [][]netmapSDK.NodeInfo{nodes}, nil
+ }
+ return nil, errors.New("unexpected placement build")
+ }
+
+ remoteHeadFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
+ require.True(t, raw, "remote header for parent object must be called with raw flag")
+ index := int(ni.PublicKey()[0])
+ require.True(t, index == 1 || index == 2, "invalid node to get parent header")
+ require.True(t, a.Container() == chunkAddress.Container() && a.Object() == parentID, "invalid address to get remote header")
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(oidtest.ID())
+ ch.Index = uint32(index)
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+
+ localHeadFn := func(_ context.Context, a oid.Address) (*objectSDK.Object, error) {
+ require.True(t, a.Container() == chunkAddress.Container() && a.Object() == parentID, "invalid address to get remote header")
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(oidtest.ID())
+ ch.Index = uint32(0)
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+
+ p := New(
+ WithContainerSource(containerSrc),
+ WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
+ WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
+ return bytes.Equal(k, nodes[0].PublicKey())
+ })),
+ WithRemoteObjectHeaderFunc(remoteHeadFn),
+ WithLocalObjectHeaderFunc(localHeadFn),
+ WithPool(testPool(t)),
+ )
+
+ objInfo := objectcore.Info{
+ Address: chunkAddress,
+ Type: objectSDK.TypeRegular,
+ ECInfo: &objectcore.ECInfo{
+ ParentID: parentID,
+ Index: 0,
+ Total: 3,
+ },
+ }
+ err := p.processObject(context.Background(), objInfo)
+ require.NoError(t, err)
+}
+
+func TestECChunkHasInvalidPlacement(t *testing.T) {
+ t.Parallel()
+ chunkAddress := oidtest.Address()
+ parentID := oidtest.ID()
+ chunkObject := objectSDK.New()
+ chunkObject.SetContainerID(chunkAddress.Container())
+ chunkObject.SetID(chunkAddress.Object())
+ chunkObject.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ chunkObject.SetPayloadSize(uint64(10))
+ chunkObject.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: parentID}, 0, 3, []byte{}, 0))
+
+ var policy netmapSDK.PlacementPolicy
+ require.NoError(t, policy.DecodeString("EC 2.1"))
+
+ cnr := &container.Container{}
+ cnr.Value.Init()
+ cnr.Value.SetPlacementPolicy(policy)
+ containerSrc := containerSrc{
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ if id.Equals(chunkAddress.Container()) {
+ return cnr, nil
+ }
+ return nil, new(apistatus.ContainerNotFound)
+ },
+ }
+
+ nodes := make([]netmapSDK.NodeInfo, 4)
+ for i := range nodes {
+ nodes[i].SetPublicKey([]byte{byte(i)})
+ }
+
+ placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ if cnr.Equals(chunkAddress.Container()) && obj.Equals(parentID) {
+ return [][]netmapSDK.NodeInfo{nodes}, nil
+ }
+ return nil, errors.New("unexpected placement build")
+ }
+
+ objInfo := objectcore.Info{
+ Address: chunkAddress,
+ Type: objectSDK.TypeRegular,
+ ECInfo: &objectcore.ECInfo{
+ ParentID: parentID,
+ Index: 1,
+ Total: 3,
+ },
+ }
+
+ t.Run("node0 has chunk1, node1 has chunk0 and chunk1", func(t *testing.T) {
+ // policer should pull chunk0 on first run and drop chunk1 on second run
+ var allowDrop bool
+ requiredChunkID := oidtest.ID()
+ headFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
+ if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a == chunkAddress && !raw {
+ return chunkObject, nil
+ }
+ if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a.Container() == chunkAddress.Container() &&
+ a.Object() == parentID && raw {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(oidtest.ID())
+ ch.Index = 1
+ ch.Total = 3
+ ei.AddChunk(ch)
+ ch.Index = 0
+ ch.SetID(requiredChunkID)
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ if bytes.Equal(ni.PublicKey(), nodes[2].PublicKey()) && a.Container() == chunkAddress.Container() &&
+ a.Object() == parentID && raw {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(oidtest.ID())
+ ch.Index = 2
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ if bytes.Equal(ni.PublicKey(), nodes[3].PublicKey()) && a.Container() == chunkAddress.Container() &&
+ a.Object() == parentID && raw {
+ return nil, new(apistatus.ObjectNotFound)
+ }
+ require.Fail(t, "unexpected remote HEAD")
+ return nil, fmt.Errorf("unexpected remote HEAD")
+ }
+
+ localHeadF := func(_ context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ require.True(t, addr.Container() == chunkAddress.Container() && addr.Object() == parentID, "unexpected local HEAD")
+ if allowDrop {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(oidtest.ID())
+ ch.Index = 1
+ ch.Total = 3
+ ei.AddChunk(ch)
+ ch.SetID(requiredChunkID)
+ ch.Index = 0
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(oidtest.ID())
+ ch.Index = 1
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+
+ var pullCounter atomic.Int64
+ var dropped []oid.Address
+ p := New(
+ WithContainerSource(containerSrc),
+ WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
+ WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
+ return bytes.Equal(k, nodes[0].PublicKey())
+ })),
+ WithRemoteObjectHeaderFunc(headFn),
+ WithLocalObjectHeaderFunc(localHeadF),
+ WithReplicator(&testReplicator{
+ handlePullTask: (func(ctx context.Context, r replicator.Task) {
+ require.True(t, r.Addr.Container() == chunkAddress.Container() && r.Addr.Object() == requiredChunkID &&
+ len(r.Nodes) == 1 && bytes.Equal(r.Nodes[0].PublicKey(), nodes[1].PublicKey()), "invalid pull task")
+ pullCounter.Add(1)
+ }),
+ }),
+ WithRedundantCopyCallback(func(ctx context.Context, a oid.Address) {
+ require.True(t, allowDrop, "invalid redundent copy call")
+ dropped = append(dropped, a)
+ }),
+ WithPool(testPool(t)),
+ )
+
+ err := p.processObject(context.Background(), objInfo)
+ require.NoError(t, err)
+ require.Equal(t, int64(1), pullCounter.Load(), "invalid pull count")
+ require.Equal(t, 0, len(dropped), "invalid dropped count")
+ allowDrop = true
+ err = p.processObject(context.Background(), objInfo)
+ require.NoError(t, err)
+ require.Equal(t, int64(1), pullCounter.Load(), "invalid pull count")
+ require.Equal(t, 1, len(dropped), "invalid dropped count")
+ require.True(t, chunkAddress.Equals(dropped[0]), "invalid dropped object")
+ })
+
+ t.Run("node0 has chunk0 and chunk1, node1 has chunk1", func(t *testing.T) {
+ // policer should drop chunk1
+ headFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
+ if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a == chunkAddress && !raw {
+ return chunkObject, nil
+ }
+ if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a.Container() == chunkAddress.Container() &&
+ a.Object() == parentID && raw {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkAddress.Object())
+ ch.Index = 1
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ if bytes.Equal(ni.PublicKey(), nodes[2].PublicKey()) && a.Container() == chunkAddress.Container() &&
+ a.Object() == parentID && raw {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(oidtest.ID())
+ ch.Index = 2
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ if bytes.Equal(ni.PublicKey(), nodes[3].PublicKey()) && a.Container() == chunkAddress.Container() &&
+ a.Object() == parentID && raw {
+ return nil, new(apistatus.ObjectNotFound)
+ }
+ require.Fail(t, "unexpected remote HEAD")
+ return nil, fmt.Errorf("unexpected remote HEAD")
+ }
+
+ localHeadF := func(_ context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ require.True(t, addr.Container() == chunkAddress.Container() && addr.Object() == parentID, "unexpected local HEAD")
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkAddress.Object())
+ ch.Index = 1
+ ch.Total = 3
+ ei.AddChunk(ch)
+ ch.SetID(oidtest.ID())
+ ch.Index = 0
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+
+ var dropped []oid.Address
+ p := New(
+ WithContainerSource(containerSrc),
+ WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
+ WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
+ return bytes.Equal(k, nodes[0].PublicKey())
+ })),
+ WithRemoteObjectHeaderFunc(headFn),
+ WithLocalObjectHeaderFunc(localHeadF),
+ WithRedundantCopyCallback(func(ctx context.Context, a oid.Address) {
+ dropped = append(dropped, a)
+ }),
+ WithPool(testPool(t)),
+ )
+
+ err := p.processObject(context.Background(), objInfo)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(dropped), "invalid dropped count")
+ require.True(t, chunkAddress.Equals(dropped[0]), "invalid dropped object")
+ })
+
+ t.Run("node0 has chunk0 and chunk1, node1 has no chunks", func(t *testing.T) {
+ // policer should replicate chunk1 to node1 on first run and drop chunk1 on node0 on second run
+ var secondRun bool
+ headFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
+ if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a == chunkAddress && !raw {
+ if !secondRun {
+ return nil, new(apistatus.ObjectNotFound)
+ }
+ return chunkObject, nil
+ }
+ if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a.Container() == chunkAddress.Container() &&
+ a.Object() == parentID && raw {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkAddress.Object())
+ ch.Index = 1
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ if bytes.Equal(ni.PublicKey(), nodes[2].PublicKey()) && a.Container() == chunkAddress.Container() &&
+ a.Object() == parentID && raw {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(oidtest.ID())
+ ch.Index = 2
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ if bytes.Equal(ni.PublicKey(), nodes[3].PublicKey()) && a.Container() == chunkAddress.Container() &&
+ a.Object() == parentID && raw {
+ return nil, new(apistatus.ObjectNotFound)
+ }
+ require.Fail(t, "unexpected remote HEAD")
+ return nil, fmt.Errorf("unexpected remote HEAD")
+ }
+
+ localHeadF := func(_ context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ require.True(t, addr.Container() == chunkAddress.Container() && addr.Object() == parentID, "unexpected local HEAD")
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkAddress.Object())
+ ch.Index = 1
+ ch.Total = 3
+ ei.AddChunk(ch)
+ ch.SetID(oidtest.ID())
+ ch.Index = 0
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+
+ var dropped []oid.Address
+ var replicated []replicator.Task
+ p := New(
+ WithContainerSource(containerSrc),
+ WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
+ WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
+ return bytes.Equal(k, nodes[0].PublicKey())
+ })),
+ WithRemoteObjectHeaderFunc(headFn),
+ WithLocalObjectHeaderFunc(localHeadF),
+ WithRedundantCopyCallback(func(ctx context.Context, a oid.Address) {
+ dropped = append(dropped, a)
+ }),
+ WithReplicator(&testReplicator{
+ handleReplicationTask: func(ctx context.Context, t replicator.Task, tr replicator.TaskResult) {
+ replicated = append(replicated, t)
+ },
+ }),
+ WithPool(testPool(t)),
+ )
+
+ err := p.processObject(context.Background(), objInfo)
+ require.NoError(t, err)
+ require.Equal(t, 0, len(dropped), "invalid dropped count")
+ require.Equal(t, 1, len(replicated), "invalid replicated count")
+ require.Equal(t, chunkAddress, replicated[0].Addr, "invalid replicated object")
+ require.True(t, bytes.Equal(replicated[0].Nodes[0].PublicKey(), nodes[1].PublicKey()), "invalid replicate target")
+
+ secondRun = true
+ err = p.processObject(context.Background(), objInfo)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(replicated), "invalid replicated count")
+ require.Equal(t, chunkAddress, replicated[0].Addr, "invalid replicated object")
+ require.True(t, bytes.Equal(replicated[0].Nodes[0].PublicKey(), nodes[1].PublicKey()), "invalid replicate target")
+ require.Equal(t, 1, len(dropped), "invalid dropped count")
+ require.True(t, chunkAddress.Equals(dropped[0]), "invalid dropped object")
+ })
+}
+
+func TestECChunkRestore(t *testing.T) {
+ // node0 has chunk0, node1 has chunk1
+ // policer should replicate chunk0 to node2 on the first run
+ // then restore EC object and replicate chunk2 to node2 on the second run
+ t.Parallel()
+
+ payload := make([]byte, 64)
+ rand.Read(payload)
+ parentAddress := oidtest.Address()
+ parentObject := objectSDK.New()
+ parentObject.SetContainerID(parentAddress.Container())
+ parentObject.SetPayload(payload)
+ parentObject.SetPayloadSize(64)
+ objectSDK.CalculateAndSetPayloadChecksum(parentObject)
+ err := objectSDK.CalculateAndSetID(parentObject)
+ require.NoError(t, err)
+ id, _ := parentObject.ID()
+ parentAddress.SetObject(id)
+
+ chunkIDs := make([]oid.ID, 3)
+ c, err := erasurecode.NewConstructor(2, 1)
+ require.NoError(t, err)
+ key, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ chunks, err := c.Split(parentObject, &key.PrivateKey)
+ require.NoError(t, err)
+ for i, ch := range chunks {
+ chunkIDs[i], _ = ch.ID()
+ }
+
+ var policy netmapSDK.PlacementPolicy
+ require.NoError(t, policy.DecodeString("EC 2.1"))
+
+ cnr := &container.Container{}
+ cnr.Value.Init()
+ cnr.Value.SetPlacementPolicy(policy)
+ containerSrc := containerSrc{
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ if id.Equals(parentAddress.Container()) {
+ return cnr, nil
+ }
+ return nil, new(apistatus.ContainerNotFound)
+ },
+ }
+
+ nodes := make([]netmapSDK.NodeInfo, 4)
+ for i := range nodes {
+ nodes[i].SetPublicKey([]byte{byte(i)})
+ }
+
+ placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ if cnr.Equals(parentAddress.Container()) && obj.Equals(parentAddress.Object()) {
+ return [][]netmapSDK.NodeInfo{nodes}, nil
+ }
+ return nil, errors.New("unexpected placement build")
+ }
+ var secondRun bool
+ remoteHeadFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
+ require.True(t, raw, "remote header for parent object must be called with raw flag")
+ index := int(ni.PublicKey()[0])
+ require.True(t, index == 1 || index == 2 || index == 3, "invalid node to get parent header")
+ require.True(t, a == parentAddress, "invalid address to get remote header")
+ if index == 1 {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkIDs[1])
+ ch.Index = uint32(1)
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ if index == 2 && secondRun {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkIDs[0])
+ ch.Index = uint32(0)
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ return nil, new(apistatus.ObjectNotFound)
+ }
+
+ localHeadFn := func(_ context.Context, a oid.Address) (*objectSDK.Object, error) {
+ require.True(t, a == parentAddress, "invalid address to get remote header")
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkIDs[0])
+ ch.Index = uint32(0)
+ ch.Total = 3
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+
+ var replicatedObj []*objectSDK.Object
+ p := New(
+ WithContainerSource(containerSrc),
+ WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
+ WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
+ return bytes.Equal(k, nodes[0].PublicKey())
+ })),
+ WithRemoteObjectHeaderFunc(remoteHeadFn),
+ WithLocalObjectHeaderFunc(localHeadFn),
+ WithReplicator(&testReplicator{
+ handleReplicationTask: func(ctx context.Context, t replicator.Task, tr replicator.TaskResult) {
+ if t.Obj != nil {
+ replicatedObj = append(replicatedObj, t.Obj)
+ }
+ },
+ }),
+ WithLocalObjectGetFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
+ require.True(t, a.Container() == parentAddress.Container() && a.Object() == chunkIDs[0], "invalid local object request")
+ return chunks[0], nil
+ }),
+ WithRemoteObjectGetFunc(func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address) (*objectSDK.Object, error) {
+ index := ni.PublicKey()[0]
+ if index == 2 {
+ return nil, new(apistatus.ObjectNotFound)
+ }
+ return chunks[index], nil
+ }),
+ WithPool(testPool(t)),
+ WithKeyStorage(util.NewKeyStorage(&key.PrivateKey, nil, nil)),
+ )
+
+ var chunkAddress oid.Address
+ chunkAddress.SetContainer(parentAddress.Container())
+ chunkAddress.SetObject(chunkIDs[0])
+ objInfo := objectcore.Info{
+ Address: chunkAddress,
+ Type: objectSDK.TypeRegular,
+ ECInfo: &objectcore.ECInfo{
+ ParentID: parentAddress.Object(),
+ Index: 0,
+ Total: 3,
+ },
+ }
+ err = p.processObject(context.Background(), objInfo)
+ require.NoError(t, err)
+ secondRun = true
+ err = p.processObject(context.Background(), objInfo)
+ require.NoError(t, err)
+
+ require.Equal(t, 1, len(replicatedObj), "invalid replicated objects count")
+ chunks[2].SetSignature(nil)
+ expectedData, err := chunks[2].MarshalJSON()
+ require.NoError(t, err)
+ replicatedObj[0].SetSignature(nil)
+ actualData, err := replicatedObj[0].MarshalJSON()
+ require.NoError(t, err)
+ require.EqualValues(t, string(expectedData), string(actualData), "invalid restored objects")
+}
+
+func TestECChunkRestoreNodeOff(t *testing.T) {
+ // node0 has chunk0, node1 has chunk1, node2 has chunk2, node3 is out of netmap
+ t.Parallel()
+
+ payload := make([]byte, 64)
+ rand.Read(payload)
+ parentAddress := oidtest.Address()
+ parentObject := objectSDK.New()
+ parentObject.SetContainerID(parentAddress.Container())
+ parentObject.SetPayload(payload)
+ parentObject.SetPayloadSize(64)
+ objectSDK.CalculateAndSetPayloadChecksum(parentObject)
+ err := objectSDK.CalculateAndSetID(parentObject)
+ require.NoError(t, err)
+ id, _ := parentObject.ID()
+ parentAddress.SetObject(id)
+
+ chunkIDs := make([]oid.ID, 4)
+ c, err := erasurecode.NewConstructor(3, 1)
+ require.NoError(t, err)
+ key, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ chunks, err := c.Split(parentObject, &key.PrivateKey)
+ require.NoError(t, err)
+ for i, ch := range chunks {
+ chunkIDs[i], _ = ch.ID()
+ }
+
+ var policy netmapSDK.PlacementPolicy
+ require.NoError(t, policy.DecodeString("EC 3.1"))
+
+ cnr := &container.Container{}
+ cnr.Value.Init()
+ cnr.Value.SetPlacementPolicy(policy)
+ containerSrc := containerSrc{
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ if id.Equals(parentAddress.Container()) {
+ return cnr, nil
+ }
+ return nil, new(apistatus.ContainerNotFound)
+ },
+ }
+
+ nodes := make([]netmapSDK.NodeInfo, 3)
+ for i := range nodes {
+ nodes[i].SetPublicKey([]byte{byte(i)})
+ }
+
+ placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ if cnr.Equals(parentAddress.Container()) && obj.Equals(parentAddress.Object()) {
+ return [][]netmapSDK.NodeInfo{nodes}, nil
+ }
+ return nil, errors.New("unexpected placement build")
+ }
+ remoteHeadFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
+ require.True(t, raw, "remote header for parent object must be called with raw flag")
+ index := int(ni.PublicKey()[0])
+ require.True(t, index == 1 || index == 2, "invalid node to get parent header")
+ require.True(t, a == parentAddress, "invalid address to get remote header")
+ if index == 1 {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkIDs[1])
+ ch.Index = uint32(1)
+ ch.Total = 4
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+ if index == 2 {
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkIDs[2])
+ ch.Index = uint32(2)
+ ch.Total = 4
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+
+ return nil, new(apistatus.ObjectNotFound)
+ }
+
+ localHeadFn := func(_ context.Context, a oid.Address) (*objectSDK.Object, error) {
+ require.True(t, a == parentAddress, "invalid address to get remote header")
+ ei := objectSDK.NewECInfo()
+ var ch objectSDK.ECChunk
+ ch.SetID(chunkIDs[0])
+ ch.Index = uint32(0)
+ ch.Total = 4
+ ei.AddChunk(ch)
+ return nil, objectSDK.NewECInfoError(ei)
+ }
+
+ var replicatedObj []*objectSDK.Object
+ p := New(
+ WithContainerSource(containerSrc),
+ WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
+ WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
+ return bytes.Equal(k, nodes[0].PublicKey())
+ })),
+ WithRemoteObjectHeaderFunc(remoteHeadFn),
+ WithLocalObjectHeaderFunc(localHeadFn),
+ WithReplicator(&testReplicator{
+ handleLocalPutTask: func(ctx context.Context, task replicator.Task) {
+ if task.Obj != nil {
+ replicatedObj = append(replicatedObj, task.Obj)
+ }
+ },
+ }),
+ WithLocalObjectGetFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
+ require.True(t, a.Container() == parentAddress.Container() && a.Object() == chunkIDs[0], "invalid local object request")
+ return chunks[0], nil
+ }),
+ WithRemoteObjectGetFunc(func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address) (*objectSDK.Object, error) {
+ index := ni.PublicKey()[0]
+ return chunks[index], nil
+ }),
+ WithPool(testPool(t)),
+ WithKeyStorage(util.NewKeyStorage(&key.PrivateKey, nil, nil)),
+ )
+
+ var chunkAddress oid.Address
+ chunkAddress.SetContainer(parentAddress.Container())
+ chunkAddress.SetObject(chunkIDs[0])
+ objInfo := objectcore.Info{
+ Address: chunkAddress,
+ Type: objectSDK.TypeRegular,
+ ECInfo: &objectcore.ECInfo{
+ ParentID: parentAddress.Object(),
+ Index: 0,
+ Total: 4,
+ },
+ }
+ err = p.processObject(context.Background(), objInfo)
+ require.NoError(t, err)
+
+ require.Equal(t, 1, len(replicatedObj), "invalid replicated objects count")
+ chunks[3].SetSignature(nil)
+ expectedData, err := chunks[3].MarshalJSON()
+ require.NoError(t, err)
+ replicatedObj[0].SetSignature(nil)
+ actualData, err := replicatedObj[0].MarshalJSON()
+ require.NoError(t, err)
+ require.EqualValues(t, string(expectedData), string(actualData), "invalid restored objects")
+}
diff --git a/pkg/services/policer/metrics.go b/pkg/services/policer/metrics.go
new file mode 100644
index 0000000000..c2ad2b0b53
--- /dev/null
+++ b/pkg/services/policer/metrics.go
@@ -0,0 +1,9 @@
+package policer
+
+type MetricsRegister interface {
+ IncProcessedObjects()
+}
+
+type noopMetrics struct{}
+
+func (noopMetrics) IncProcessedObjects() {}
diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go
new file mode 100644
index 0000000000..c2157de5d5
--- /dev/null
+++ b/pkg/services/policer/nodecache.go
@@ -0,0 +1,42 @@
+package policer
+
+import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+
+type nodeProcessStatus int8
+
+const (
+ nodeNotProcessed nodeProcessStatus = iota
+ nodeDoesNotHoldObject
+ nodeHoldsObject
+ nodeStatusUnknown
+ nodeIsUnderMaintenance
+ nodeIsLocal
+)
+
+func (st nodeProcessStatus) Processed() bool {
+ return st != nodeNotProcessed
+}
+
+// nodeCache tracks Policer's check progress.
+type nodeCache map[uint64]nodeProcessStatus
+
+func newNodeCache() nodeCache {
+ return make(map[uint64]nodeProcessStatus)
+}
+
+func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) {
+ n[node.Hash()] = val
+}
+
+// processStatus returns current processing status of the storage node.
+func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
+ return n[node.Hash()]
+}
+
+// SubmitSuccessfulReplication marks given storage node as a current object
+// replica holder.
+//
+// SubmitSuccessfulReplication implements replicator.TaskResult.
+func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) {
+ n.set(node, nodeHoldsObject)
+}
diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go
new file mode 100644
index 0000000000..5d59604c20
--- /dev/null
+++ b/pkg/services/policer/option.go
@@ -0,0 +1,214 @@
+package policer
+
+import (
+ "context"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
+)
+
+// KeySpaceIterator is the interface that allows iterating over the key space
+// of local storage.
+// Note that the underlying implementation might be circular: i.e. it can restart
+// when the end of the key space is reached.
+type KeySpaceIterator interface {
+ Next(context.Context, uint32) ([]objectcore.Info, error)
+ Rewind()
+}
+
+// RedundantCopyCallback is a callback to pass
+// the redundant local copy of the object.
+type RedundantCopyCallback func(context.Context, oid.Address)
+
+// BuryFunc is the function to bury (i.e. inhume) an object.
+type BuryFunc func(context.Context, oid.Address) error
+
+// Replicator is the interface to a consumer of replication tasks.
+type Replicator interface {
+ HandleReplicationTask(ctx context.Context, task replicator.Task, res replicator.TaskResult)
+ HandlePullTask(ctx context.Context, task replicator.Task)
+ HandleLocalPutTask(ctx context.Context, task replicator.Task)
+}
+
+// RemoteObjectHeaderFunc is the function to obtain HEAD info from a specific remote node.
+type RemoteObjectHeaderFunc func(context.Context, netmapSDK.NodeInfo, oid.Address, bool) (*objectSDK.Object, error)
+
+// LocalObjectHeaderFunc is the function to obtain HEAD info from the current node.
+type LocalObjectHeaderFunc func(context.Context, oid.Address) (*objectSDK.Object, error)
+
+type RemoteObjectGetFunc func(context.Context, netmapSDK.NodeInfo, oid.Address) (*objectSDK.Object, error)
+
+type LocalObjectGetFunc func(context.Context, oid.Address) (*objectSDK.Object, error)
+
+type cfg struct {
+ headTimeout time.Duration
+
+ log *logger.Logger
+
+ keySpaceIterator KeySpaceIterator
+
+ buryFn BuryFunc
+
+ cnrSrc container.Source
+
+ placementBuilder placement.Builder
+
+ remoteHeader RemoteObjectHeaderFunc
+
+ localHeader LocalObjectHeaderFunc
+
+ netmapKeys netmap.AnnouncedKeys
+
+ replicator Replicator
+
+ cbRedundantCopy RedundantCopyCallback
+
+ taskPool *ants.Pool
+
+ batchSize, cacheSize uint32
+
+ evictDuration, sleepDuration time.Duration
+
+ metrics MetricsRegister
+
+ remoteObject RemoteObjectGetFunc
+
+ localObject LocalObjectGetFunc
+
+ keyStorage *util.KeyStorage
+}
+
+func defaultCfg() *cfg {
+ return &cfg{
+ log: logger.NewLoggerWrapper(zap.L()),
+ batchSize: 10,
+ cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB
+ sleepDuration: 1 * time.Second,
+ evictDuration: 30 * time.Second,
+ metrics: noopMetrics{},
+ }
+}
+
+// Option is an option for Policer constructor.
+type Option func(*cfg)
+
+// WithHeadTimeout returns option to set Head timeout of Policer.
+func WithHeadTimeout(v time.Duration) Option {
+ return func(c *cfg) {
+ c.headTimeout = v
+ }
+}
+
+// WithLogger returns option to set Logger of Policer.
+func WithLogger(v *logger.Logger) Option {
+ return func(c *cfg) {
+ c.log = v
+ }
+}
+
+func WithKeySpaceIterator(it KeySpaceIterator) Option {
+ return func(c *cfg) {
+ c.keySpaceIterator = it
+ }
+}
+
+func WithBuryFunc(f BuryFunc) Option {
+ return func(c *cfg) {
+ c.buryFn = f
+ }
+}
+
+// WithContainerSource returns option to set container source of Policer.
+func WithContainerSource(v container.Source) Option {
+ return func(c *cfg) {
+ c.cnrSrc = v
+ }
+}
+
+// WithPlacementBuilder returns option to set object placement builder of Policer.
+func WithPlacementBuilder(v placement.Builder) Option {
+ return func(c *cfg) {
+ c.placementBuilder = v
+ }
+}
+
+// WithRemoteObjectHeaderFunc returns option to set remote object header receiver of Policer.
+func WithRemoteObjectHeaderFunc(v RemoteObjectHeaderFunc) Option {
+ return func(c *cfg) {
+ c.remoteHeader = v
+ }
+}
+
+// WithLocalObjectHeaderFunc returns option to set local object header receiver of Policer.
+func WithLocalObjectHeaderFunc(v LocalObjectHeaderFunc) Option {
+ return func(c *cfg) {
+ c.localHeader = v
+ }
+}
+
+func WithRemoteObjectGetFunc(v RemoteObjectGetFunc) Option {
+ return func(c *cfg) {
+ c.remoteObject = v
+ }
+}
+
+func WithLocalObjectGetFunc(v LocalObjectGetFunc) Option {
+ return func(c *cfg) {
+ c.localObject = v
+ }
+}
+
+// WithNetmapKeys returns option to set tool to work with announced public keys.
+func WithNetmapKeys(v netmap.AnnouncedKeys) Option {
+ return func(c *cfg) {
+ c.netmapKeys = v
+ }
+}
+
+// WithReplicator returns option to set object replicator of Policer.
+func WithReplicator(v Replicator) Option {
+ return func(c *cfg) {
+ c.replicator = v
+ }
+}
+
+// WithRedundantCopyCallback returns option to set
+// callback to pass redundant local object copies
+// detected by Policer.
+func WithRedundantCopyCallback(cb RedundantCopyCallback) Option {
+ return func(c *cfg) {
+ c.cbRedundantCopy = cb
+ }
+}
+
+// WithPool returns option to set pool for
+// policy and replication operations.
+func WithPool(p *ants.Pool) Option {
+ return func(c *cfg) {
+ c.taskPool = p
+ }
+}
+
+// WithMetrics returns option to set metrics.
+func WithMetrics(m MetricsRegister) Option {
+ return func(c *cfg) {
+ c.metrics = m
+ }
+}
+
+func WithKeyStorage(ks *util.KeyStorage) Option {
+ return func(c *cfg) {
+ c.keyStorage = ks
+ }
+}
diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go
index dba0c1cba2..c91e7cc7c1 100644
--- a/pkg/services/policer/policer.go
+++ b/pkg/services/policer/policer.go
@@ -1,51 +1,40 @@
package policer
import (
+ "fmt"
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- headsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/head"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
- "github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
)
-// NodeLoader provides application load statistics.
-type nodeLoader interface {
- // ObjectServiceLoad returns object service load value in [0:1] range.
- ObjectServiceLoad() float64
-}
-
type objectsInWork struct {
- m sync.RWMutex
+ sync.RWMutex
objs map[oid.Address]struct{}
}
func (oiw *objectsInWork) inWork(addr oid.Address) bool {
- oiw.m.RLock()
+ oiw.RLock()
_, ok := oiw.objs[addr]
- oiw.m.RUnlock()
+ oiw.RUnlock()
return ok
}
func (oiw *objectsInWork) remove(addr oid.Address) {
- oiw.m.Lock()
+ oiw.Lock()
delete(oiw.objs, addr)
- oiw.m.Unlock()
+ oiw.Unlock()
}
-func (oiw *objectsInWork) add(addr oid.Address) {
- oiw.m.Lock()
+func (oiw *objectsInWork) add(addr oid.Address) bool {
+ oiw.Lock()
+ _, exists := oiw.objs[addr]
oiw.objs[addr] = struct{}{}
- oiw.m.Unlock()
+ oiw.Unlock()
+ return !exists
}
// Policer represents the utility that verifies
@@ -58,53 +47,6 @@ type Policer struct {
objsInWork *objectsInWork
}
-// Option is an option for Policer constructor.
-type Option func(*cfg)
-
-// RedundantCopyCallback is a callback to pass
-// the redundant local copy of the object.
-type RedundantCopyCallback func(oid.Address)
-
-type cfg struct {
- headTimeout time.Duration
-
- log *logger.Logger
-
- jobQueue jobQueue
-
- cnrSrc container.Source
-
- placementBuilder placement.Builder
-
- remoteHeader *headsvc.RemoteHeader
-
- netmapKeys netmap.AnnouncedKeys
-
- replicator *replicator.Replicator
-
- cbRedundantCopy RedundantCopyCallback
-
- taskPool *ants.Pool
-
- loader nodeLoader
-
- maxCapacity int
-
- batchSize, cacheSize uint32
-
- rebalanceFreq, evictDuration time.Duration
-}
-
-func defaultCfg() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- batchSize: 10,
- cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB
- rebalanceFreq: 1 * time.Second,
- evictDuration: 30 * time.Second,
- }
-}
-
// New creates, initializes and returns Policer instance.
func New(opts ...Option) *Policer {
c := defaultCfg()
@@ -113,106 +55,14 @@ func New(opts ...Option) *Policer {
opts[i](c)
}
- c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))}
-
cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize))
- if err != nil {
- panic(err)
- }
+ assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize))
return &Policer{
cfg: c,
cache: cache,
objsInWork: &objectsInWork{
- objs: make(map[oid.Address]struct{}, c.maxCapacity),
+ objs: make(map[oid.Address]struct{}, c.taskPool.Cap()),
},
}
}
-
-// WithHeadTimeout returns option to set Head timeout of Policer.
-func WithHeadTimeout(v time.Duration) Option {
- return func(c *cfg) {
- c.headTimeout = v
- }
-}
-
-// WithLogger returns option to set Logger of Policer.
-func WithLogger(v *logger.Logger) Option {
- return func(c *cfg) {
- c.log = v
- }
-}
-
-// WithLocalStorage returns option to set local object storage of Policer.
-func WithLocalStorage(v *engine.StorageEngine) Option {
- return func(c *cfg) {
- c.jobQueue.localStorage = v
- }
-}
-
-// WithContainerSource returns option to set container source of Policer.
-func WithContainerSource(v container.Source) Option {
- return func(c *cfg) {
- c.cnrSrc = v
- }
-}
-
-// WithPlacementBuilder returns option to set object placement builder of Policer.
-func WithPlacementBuilder(v placement.Builder) Option {
- return func(c *cfg) {
- c.placementBuilder = v
- }
-}
-
-// WithRemoteHeader returns option to set object header receiver of Policer.
-func WithRemoteHeader(v *headsvc.RemoteHeader) Option {
- return func(c *cfg) {
- c.remoteHeader = v
- }
-}
-
-// WithNetmapKeys returns option to set tool to work with announced public keys.
-func WithNetmapKeys(v netmap.AnnouncedKeys) Option {
- return func(c *cfg) {
- c.netmapKeys = v
- }
-}
-
-// WithReplicator returns option to set object replicator of Policer.
-func WithReplicator(v *replicator.Replicator) Option {
- return func(c *cfg) {
- c.replicator = v
- }
-}
-
-// WithRedundantCopyCallback returns option to set
-// callback to pass redundant local object copies
-// detected by Policer.
-func WithRedundantCopyCallback(cb RedundantCopyCallback) Option {
- return func(c *cfg) {
- c.cbRedundantCopy = cb
- }
-}
-
-// WithMaxCapacity returns option to set max capacity
-// that can be set to the pool.
-func WithMaxCapacity(capacity int) Option {
- return func(c *cfg) {
- c.maxCapacity = capacity
- }
-}
-
-// WithPool returns option to set pool for
-// policy and replication operations.
-func WithPool(p *ants.Pool) Option {
- return func(c *cfg) {
- c.taskPool = p
- }
-}
-
-// WithNodeLoader returns option to set FrostFS node load source.
-func WithNodeLoader(l nodeLoader) Option {
- return func(c *cfg) {
- c.loader = l
- }
-}
diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go
new file mode 100644
index 0000000000..049c337531
--- /dev/null
+++ b/pkg/services/policer/policer_test.go
@@ -0,0 +1,486 @@
+package policer
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "slices"
+ "sort"
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/panjf2000/ants/v2"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBuryObjectWithoutContainer(t *testing.T) {
+ // Key space
+ addr := oidtest.Address()
+ objs := []objectcore.Info{
+ {
+ Address: addr,
+ Type: objectSDK.TypeRegular,
+ },
+ }
+
+ // Container source and bury function
+ buryCh := make(chan oid.Address)
+ containerSrc := containerSrc{
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ return nil, new(apistatus.ContainerNotFound)
+ },
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ return &container.DelInfo{}, nil
+ },
+ }
+ buryFn := func(ctx context.Context, a oid.Address) error {
+ buryCh <- a
+ return nil
+ }
+
+ // Policer instance
+ p := New(
+ WithKeySpaceIterator(&sliceKeySpaceIterator{objs: objs}),
+ WithContainerSource(containerSrc),
+ WithBuryFunc(buryFn),
+ WithPool(testPool(t)),
+ )
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go p.Run(ctx)
+
+ require.Equal(t, addr, <-buryCh)
+}
+
+func TestProcessObject(t *testing.T) {
+ // Notes:
+ // - nodes are referred to by their index throughout, which is embedded in the public key
+ // - node with index 0 always refers to the local node, so there's no need to add it to objHolders
+ // - policy is used only to match the number of replicas for each index in the placement
+ tests := []struct {
+ desc string
+ objType objectSDK.Type
+ nodeCount int
+ policy string
+ placement [][]int
+ objHolders []int
+ maintenanceNodes []int
+ wantRemoveRedundant bool
+ wantReplicateTo []int
+ headResult map[int]error
+ ecInfo *objectcore.ECInfo
+ }{
+ {
+ desc: "1 copy already held by local node",
+ nodeCount: 1,
+ policy: `REP 1`,
+ placement: [][]int{{0}},
+ },
+ {
+ desc: "1 copy already held by the remote node",
+ nodeCount: 2,
+ policy: `REP 1`,
+ placement: [][]int{{1}},
+ objHolders: []int{1},
+ wantRemoveRedundant: true,
+ },
+ {
+ desc: "1 copy not yet held by the remote node",
+ nodeCount: 2,
+ policy: `REP 1`,
+ placement: [][]int{{1}},
+ wantReplicateTo: []int{1},
+ },
+ {
+ desc: "2 copies already held by local and remote node",
+ nodeCount: 2,
+ policy: `REP 2`,
+ placement: [][]int{{0, 1}},
+ objHolders: []int{1},
+ },
+ {
+ desc: "2 copies but not held by remote node",
+ nodeCount: 2,
+ policy: `REP 2`,
+ placement: [][]int{{0, 1}},
+ wantReplicateTo: []int{1},
+ },
+ {
+ desc: "multiple vectors already held by remote node",
+ nodeCount: 2,
+ policy: `REP 2 REP 2`,
+ placement: [][]int{{0, 1}, {0, 1}},
+ objHolders: []int{1},
+ },
+ {
+ desc: "multiple vectors not yet held by remote node",
+ nodeCount: 2,
+ policy: `REP 2 REP 2`,
+ placement: [][]int{{0, 1}, {0, 1}},
+ wantReplicateTo: []int{1},
+ },
+ {
+ desc: "lock object must be replicated to all nodes",
+ objType: objectSDK.TypeLock,
+ nodeCount: 3,
+ policy: `REP 1`,
+ placement: [][]int{{0, 1, 2}},
+ wantReplicateTo: []int{1, 2},
+ },
+ {
+ desc: "preserve local copy when maintenance nodes exist",
+ nodeCount: 3,
+ policy: `REP 2`,
+ placement: [][]int{{1, 2}},
+ objHolders: []int{1},
+ maintenanceNodes: []int{2},
+ },
+ {
+ desc: "preserve local copy when node response with MAINTENANCE",
+ nodeCount: 3,
+ policy: `REP 2`,
+ placement: [][]int{{1, 2}},
+ objHolders: []int{1},
+ headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)},
+ },
+ {
+ desc: "lock object must be replicated to all EC nodes",
+ objType: objectSDK.TypeLock,
+ nodeCount: 3,
+ policy: `EC 1.1`,
+ placement: [][]int{{0, 1, 2}},
+ wantReplicateTo: []int{1, 2},
+ },
+ {
+ desc: "tombstone object must be replicated to all EC nodes",
+ objType: objectSDK.TypeTombstone,
+ nodeCount: 3,
+ policy: `EC 1.1`,
+ placement: [][]int{{0, 1, 2}},
+ wantReplicateTo: []int{1, 2},
+ },
+ {
+ desc: "do not remove local copy when MAINTENANCE status is cached",
+ objType: objectSDK.TypeRegular,
+ nodeCount: 3,
+ policy: `REP 1 REP 1`,
+ placement: [][]int{{1, 2}, {1, 0}},
+ headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)},
+ },
+ }
+
+ for i := range tests {
+ ti := tests[i]
+ t.Run(ti.desc, func(t *testing.T) {
+ addr := oidtest.Address()
+
+ // Netmap, placement policy and placement builder
+ nodes := make([]netmap.NodeInfo, ti.nodeCount)
+ for i := range nodes {
+ nodes[i].SetPublicKey([]byte{byte(i)})
+ }
+ for _, i := range ti.maintenanceNodes {
+ nodes[i].SetStatus(netmap.Maintenance)
+ }
+
+ var policy netmap.PlacementPolicy
+ require.NoError(t, policy.DecodeString(ti.policy))
+
+ placementVectors := make([][]netmap.NodeInfo, len(ti.placement))
+ for i, pv := range ti.placement {
+ for _, nj := range pv {
+ placementVectors[i] = append(placementVectors[i], nodes[nj])
+ }
+ }
+ placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+ if cnr.Equals(addr.Container()) && obj != nil && obj.Equals(addr.Object()) {
+ return placementVectors, nil
+ }
+ if ti.ecInfo != nil && cnr.Equals(addr.Container()) && obj != nil && obj.Equals(ti.ecInfo.ParentID) {
+ return placementVectors, nil
+ }
+ t.Errorf("unexpected placement build: cid=%v oid=%v", cnr, obj)
+ return nil, errors.New("unexpected placement build")
+ }
+
+ // Object remote header
+ headFn := func(_ context.Context, ni netmap.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
+ index := int(ni.PublicKey()[0])
+ if a != addr || index < 1 || index >= ti.nodeCount {
+ t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a)
+ return nil, errors.New("unexpected object head")
+ }
+ if ti.headResult != nil {
+ if err, ok := ti.headResult[index]; ok {
+ return nil, err
+ }
+ }
+ if slices.Contains(ti.objHolders, index) {
+ return nil, nil
+ }
+ return nil, new(apistatus.ObjectNotFound)
+ }
+
+ // Container source
+ cnr := &container.Container{}
+ cnr.Value.Init()
+ cnr.Value.SetPlacementPolicy(policy)
+ containerSrc := containerSrc{
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ if id.Equals(addr.Container()) {
+ return cnr, nil
+ }
+ t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container())
+ return nil, new(apistatus.ContainerNotFound)
+ },
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ return &container.DelInfo{}, nil
+ },
+ }
+ buryFn := func(ctx context.Context, a oid.Address) error {
+ t.Errorf("unexpected object buried: %v", a)
+ return nil
+ }
+
+ // Policer instance
+ var gotRemoveRedundant bool
+ var gotReplicateTo []int
+
+ p := New(
+ WithContainerSource(containerSrc),
+ WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
+ WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
+ return bytes.Equal(k, nodes[0].PublicKey())
+ })),
+ WithRemoteObjectHeaderFunc(headFn),
+ WithBuryFunc(buryFn),
+ WithRedundantCopyCallback(func(_ context.Context, a oid.Address) {
+ require.True(t, a.Equals(addr), "unexpected redundant copy callback: a=%v", a)
+ gotRemoveRedundant = true
+ }),
+ WithReplicator(&testReplicator{
+ handleReplicationTask: func(_ context.Context, task replicator.Task, res replicator.TaskResult) {
+ require.True(t, task.Addr.Equals(addr), "unexpected replicator task: %+v", task)
+ for _, node := range task.Nodes {
+ gotReplicateTo = append(gotReplicateTo, int(node.PublicKey()[0]))
+ }
+ },
+ }),
+ WithPool(testPool(t)),
+ )
+
+ addrWithType := objectcore.Info{
+ Address: addr,
+ Type: ti.objType,
+ ECInfo: ti.ecInfo,
+ }
+
+ err := p.processObject(context.Background(), addrWithType)
+ require.NoError(t, err)
+ sort.Ints(gotReplicateTo)
+
+ require.Equal(t, ti.wantRemoveRedundant, gotRemoveRedundant)
+ require.Equal(t, ti.wantReplicateTo, gotReplicateTo)
+ })
+ }
+}
+
+func TestProcessObjectError(t *testing.T) {
+ addr := oidtest.Address()
+ // Container source
+ cnr := &container.Container{}
+ cnr.Value.Init()
+ source := containerSrc{
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ return nil, new(apistatus.ContainerNotFound)
+ },
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ return nil, new(apistatus.ContainerNotFound)
+ },
+ }
+ buryFn := func(ctx context.Context, a oid.Address) error {
+ t.Errorf("unexpected object buried: %v", a)
+ return nil
+ }
+ p := New(
+ WithContainerSource(source),
+ WithBuryFunc(buryFn),
+ WithPool(testPool(t)),
+ )
+
+ addrWithType := objectcore.Info{
+ Address: addr,
+ }
+
+ require.True(t, client.IsErrContainerNotFound(p.processObject(context.Background(), addrWithType)))
+}
+
+func TestIteratorContract(t *testing.T) {
+ addr := oidtest.Address()
+ objs := []objectcore.Info{{
+ Address: addr,
+ Type: objectSDK.TypeRegular,
+ }}
+
+ buryFn := func(ctx context.Context, a oid.Address) error {
+ return nil
+ }
+
+ it := &predefinedIterator{
+ scenario: []nextResult{
+ {objs, nil},
+ {nil, errors.New("opaque")},
+ {nil, engine.ErrEndOfListing},
+ {nil, engine.ErrEndOfListing},
+ {nil, errors.New("opaque")},
+ {objs, engine.ErrEndOfListing},
+ },
+ finishCh: make(chan struct{}),
+ }
+
+ containerSrc := containerSrc{
+ get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ return nil, new(apistatus.ContainerNotFound)
+ },
+ deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ return &container.DelInfo{}, nil
+ },
+ }
+
+ p := New(
+ WithKeySpaceIterator(it),
+ WithContainerSource(containerSrc),
+ WithBuryFunc(buryFn),
+ WithPool(testPool(t)),
+ func(c *cfg) {
+ c.sleepDuration = time.Millisecond
+ },
+ )
+
+ ctx, cancel := context.WithCancel(context.Background())
+ go p.Run(ctx)
+
+ <-it.finishCh
+ cancel()
+ require.Equal(t, []string{
+ "Next",
+ "Next",
+ "Next",
+ "Rewind",
+ "Next",
+ "Rewind",
+ "Next",
+ "Next",
+ "Rewind",
+ }, it.calls)
+}
+
+func testPool(t *testing.T) *ants.Pool {
+ pool, err := ants.NewPool(4)
+ require.NoError(t, err)
+ return pool
+}
+
+type nextResult struct {
+ objs []objectcore.Info
+ err error
+}
+
+type predefinedIterator struct {
+ scenario []nextResult
+ finishCh chan struct{}
+ pos int
+ calls []string
+}
+
+func (it *predefinedIterator) Next(ctx context.Context, size uint32) ([]objectcore.Info, error) {
+ if it.pos == len(it.scenario) {
+ close(it.finishCh)
+ <-ctx.Done()
+ return nil, nil
+ }
+
+ res := it.scenario[it.pos]
+ it.pos += 1
+ it.calls = append(it.calls, "Next")
+ return res.objs, res.err
+}
+
+func (it *predefinedIterator) Rewind() {
+ it.calls = append(it.calls, "Rewind")
+}
+
+// sliceKeySpaceIterator is a KeySpaceIterator backed by a slice.
+type sliceKeySpaceIterator struct {
+ objs []objectcore.Info
+ cur int
+}
+
+func (it *sliceKeySpaceIterator) Next(_ context.Context, size uint32) ([]objectcore.Info, error) {
+ if it.cur >= len(it.objs) {
+ return nil, engine.ErrEndOfListing
+ }
+ end := min(it.cur+int(size), len(it.objs))
+ ret := it.objs[it.cur:end]
+ it.cur = end
+ return ret, nil
+}
+
+func (it *sliceKeySpaceIterator) Rewind() {
+ it.cur = 0
+}
+
+type containerSrc struct {
+ get func(ctx context.Context, id cid.ID) (*container.Container, error)
+ deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error)
+}
+
+func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
+ return f.get(ctx, id)
+}
+
+func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ return f.deletionInfo(ctx, id)
+}
+
+// placementBuilderFunc is a placement.Builder backed by a function
+type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
+
+func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+ return f(c, o, p)
+}
+
+// announcedKeysFunc is a netmap.AnnouncedKeys backed by a function.
+type announcedKeysFunc func([]byte) bool
+
+func (f announcedKeysFunc) IsLocalKey(k []byte) bool { return f(k) }
+
+type testReplicator struct {
+ handleReplicationTask func(ctx context.Context, task replicator.Task, res replicator.TaskResult)
+ handleLocalPutTask func(ctx context.Context, task replicator.Task)
+ handlePullTask func(ctx context.Context, task replicator.Task)
+}
+
+func (r *testReplicator) HandleReplicationTask(ctx context.Context, task replicator.Task, res replicator.TaskResult) {
+ r.handleReplicationTask(ctx, task, res)
+}
+
+func (r *testReplicator) HandleLocalPutTask(ctx context.Context, task replicator.Task) {
+ r.handleLocalPutTask(ctx, task)
+}
+
+func (r *testReplicator) HandlePullTask(ctx context.Context, task replicator.Task) {
+ r.handlePullTask(ctx, task)
+}
diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go
index 687216407c..635a5683b2 100644
--- a/pkg/services/policer/process.go
+++ b/pkg/services/policer/process.go
@@ -3,48 +3,47 @@ package policer
import (
"context"
"errors"
+ "sync"
"time"
- objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
func (p *Policer) Run(ctx context.Context) {
- defer func() {
- p.log.Info("routine stopped")
- }()
-
- go p.poolCapacityWorker(ctx)
p.shardPolicyWorker(ctx)
+ p.log.Info(ctx, logs.PolicerRoutineStopped)
}
func (p *Policer) shardPolicyWorker(ctx context.Context) {
- var (
- addrs []objectcore.AddressWithType
- cursor *engine.Cursor
- err error
- )
-
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String())
for {
select {
case <-ctx.Done():
+ p.taskPool.Release()
return
default:
}
- addrs, cursor, err = p.jobQueue.Select(cursor, p.batchSize)
+ addrs, err := p.keySpaceIterator.Next(ctx, p.batchSize)
if err != nil {
if errors.Is(err, engine.ErrEndOfListing) {
- time.Sleep(time.Second) // finished whole cycle, sleep a bit
+ p.keySpaceIterator.Rewind()
+ time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit
continue
}
- p.log.Warn("failure at object select for replication", zap.Error(err))
+ p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
}
+ skipMap := newSkipMap()
for i := range addrs {
select {
case <-ctx.Done():
+ p.taskPool.Release()
return
default:
addr := addrs[i]
@@ -54,47 +53,61 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
continue
}
- err = p.taskPool.Submit(func() {
+ err := p.taskPool.Submit(func() {
v, ok := p.cache.Get(addr.Address)
if ok && time.Since(v) < p.evictDuration {
return
}
- p.objsInWork.add(addr.Address)
-
- p.processObject(ctx, addr)
-
- p.cache.Add(addr.Address, time.Now())
- p.objsInWork.remove(addr.Address)
+ if p.objsInWork.add(addr.Address) {
+ err := p.processObject(ctx, addr)
+ if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) {
+ p.log.Error(ctx, logs.PolicerUnableToProcessObj,
+ zap.Stringer("object", addr.Address),
+ zap.Error(err))
+ }
+ p.cache.Add(addr.Address, time.Now())
+ p.objsInWork.remove(addr.Address)
+ p.metrics.IncProcessedObjects()
+ }
})
if err != nil {
- p.log.Warn("pool submission", zap.Error(err))
+ p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err))
}
}
}
}
}
-func (p *Policer) poolCapacityWorker(ctx context.Context) {
- ticker := time.NewTicker(p.rebalanceFreq)
- for {
- select {
- case <-ctx.Done():
- ticker.Stop()
- return
- case <-ticker.C:
- frostfsSysLoad := p.loader.ObjectServiceLoad()
- newCapacity := int((1.0 - frostfsSysLoad) * float64(p.maxCapacity))
- if newCapacity == 0 {
- newCapacity++
- }
+type errMap struct {
+ sync.Mutex
+ skipMap map[cid.ID][]error
+}
- if p.taskPool.Cap() != newCapacity {
- p.taskPool.Tune(newCapacity)
- p.log.Debug("tune replication capacity",
- zap.Float64("system_load", frostfsSysLoad),
- zap.Int("new_capacity", newCapacity))
- }
- }
+func newSkipMap() *errMap {
+ return &errMap{
+ skipMap: make(map[cid.ID][]error),
}
}
+
+// addSeenError marks err as seen error for the container.
+// Returns true is the error has already been added.
+func (m *errMap) addSeenError(cnr cid.ID, err error) bool {
+ m.Lock()
+ defer m.Unlock()
+
+ for _, e := range m.skipMap[cnr] {
+ if errors.Is(err, e) {
+ return true
+ }
+ }
+
+ // Restrict list length to avoid possible OOM if some random error is added in future.
+ const maxErrListLength = 10
+
+ lst := m.skipMap[cnr]
+ if len(lst) < maxErrListLength {
+ m.skipMap[cnr] = append(lst, err)
+ }
+ return false
+}
diff --git a/pkg/services/policer/queue.go b/pkg/services/policer/queue.go
deleted file mode 100644
index b8af44049e..0000000000
--- a/pkg/services/policer/queue.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package policer
-
-import (
- "fmt"
-
- objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
-)
-
-type jobQueue struct {
- localStorage *engine.StorageEngine
-}
-
-func (q *jobQueue) Select(cursor *engine.Cursor, count uint32) ([]objectcore.AddressWithType, *engine.Cursor, error) {
- var prm engine.ListWithCursorPrm
- prm.WithCursor(cursor)
- prm.WithCount(count)
-
- res, err := q.localStorage.ListWithCursor(prm)
- if err != nil {
- return nil, nil, fmt.Errorf("cannot list objects in engine: %w", err)
- }
-
- return res.AddressList(), res.Cursor(), nil
-}
diff --git a/pkg/services/replicator/metrics.go b/pkg/services/replicator/metrics.go
new file mode 100644
index 0000000000..3fc0629266
--- /dev/null
+++ b/pkg/services/replicator/metrics.go
@@ -0,0 +1,8 @@
+package replicator
+
+type MetricsRegister interface {
+ IncInFlightRequest()
+ DecInFlightRequest()
+ IncProcessedObjects()
+ AddPayloadSize(size int64)
+}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 476a5bc0ac..8c6f0df060 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -3,9 +3,13 @@ package replicator
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -16,31 +20,40 @@ type TaskResult interface {
SubmitSuccessfulReplication(netmap.NodeInfo)
}
-// HandleTask executes replication task inside invoking goroutine.
+// HandleReplicationTask executes replication task inside invoking goroutine.
// Passes all the nodes that accepted the replication to the TaskResult.
-func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) {
+func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res TaskResult) {
+ p.metrics.IncInFlightRequest()
+ defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug("finish work",
- zap.Uint32("amount of unfinished replicas", task.quantity),
+ p.log.Debug(ctx, logs.ReplicatorFinishWork,
+ zap.Uint32("amount of unfinished replicas", task.NumCopies),
)
}()
- if task.obj == nil {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleReplicateTask",
+ trace.WithAttributes(
+ attribute.Stringer("address", task.Addr),
+ attribute.Int64("number_of_copies", int64(task.NumCopies)),
+ ))
+ defer span.End()
+
+ if task.Obj == nil {
var err error
- task.obj, err = engine.Get(p.localStorage, task.addr)
+ task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr)
if err != nil {
- p.log.Error("could not get object from local storage",
- zap.Stringer("object", task.addr),
+ p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage,
+ zap.Stringer("object", task.Addr),
zap.Error(err))
return
}
}
- prm := new(putsvc.RemotePutPrm).
- WithObject(task.obj)
+ prm := new(objectwriter.RemotePutPrm).
+ WithObject(task.Obj)
- for i := 0; task.quantity > 0 && i < len(task.nodes); i++ {
+ for i := 0; task.NumCopies > 0 && i < len(task.Nodes); i++ {
select {
case <-ctx.Done():
return
@@ -48,26 +61,29 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult)
}
log := p.log.With(
- zap.String("node", netmap.StringifyPublicKey(task.nodes[i])),
- zap.Stringer("object", task.addr),
+ zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])),
+ zap.Stringer("object", task.Addr),
)
callCtx, cancel := context.WithTimeout(ctx, p.putTimeout)
- err := p.remoteSender.PutObject(callCtx, prm.WithNodeInfo(task.nodes[i]))
+ err := p.remoteSender.PutObject(callCtx, prm.WithNodeInfo(task.Nodes[i]))
cancel()
if err != nil {
- log.Error("could not replicate object",
- zap.String("error", err.Error()),
+ log.Error(ctx, logs.ReplicatorCouldNotReplicateObject,
+ zap.Error(err),
)
} else {
- log.Debug("object successfully replicated")
+ log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated)
- task.quantity--
+ task.NumCopies--
- res.SubmitSuccessfulReplication(task.nodes[i])
+ res.SubmitSuccessfulReplication(task.Nodes[i])
+
+ p.metrics.IncProcessedObjects()
+ p.metrics.AddPayloadSize(int64(task.Obj.PayloadSize()))
}
}
}
diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go
new file mode 100644
index 0000000000..216fe4919c
--- /dev/null
+++ b/pkg/services/replicator/pull.go
@@ -0,0 +1,66 @@
+package replicator
+
+import (
+ "context"
+ "errors"
+ "slices"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+)
+
+var errFailedToGetObjectFromAnyNode = errors.New("failed to get object from any node")
+
+func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
+ p.metrics.IncInFlightRequest()
+ defer p.metrics.DecInFlightRequest()
+ defer func() {
+ p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask",
+ trace.WithAttributes(
+ attribute.Stringer("address", task.Addr),
+ attribute.Int("nodes_count", len(task.Nodes)),
+ ))
+ defer span.End()
+
+ var obj *objectSDK.Object
+
+ for _, node := range task.Nodes {
+ var err error
+ obj, err = p.remoteGetter.Get(ctx, getsvc.RemoteGetPrm{
+ Address: task.Addr,
+ Node: node,
+ })
+ if err == nil {
+ break
+ }
+ endpoints := slices.Collect(node.NetworkEndpoints())
+ p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ zap.Stringer("object", task.Addr),
+ zap.Error(err),
+ zap.Strings("endpoints", endpoints))
+ }
+
+ if obj == nil {
+ p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+ zap.Stringer("object", task.Addr),
+ zap.Error(errFailedToGetObjectFromAnyNode))
+ return
+ }
+
+ err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container))
+ if err != nil {
+ p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ zap.Stringer("object", task.Addr),
+ zap.Error(err))
+ }
+}
diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go
new file mode 100644
index 0000000000..bcad8471da
--- /dev/null
+++ b/pkg/services/replicator/put.go
@@ -0,0 +1,45 @@
+package replicator
+
+import (
+ "context"
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+)
+
+var errObjectNotDefined = errors.New("object is not defined")
+
+func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
+ p.metrics.IncInFlightRequest()
+ defer p.metrics.DecInFlightRequest()
+ defer func() {
+ p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
+ }()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask",
+ trace.WithAttributes(
+ attribute.Stringer("address", task.Addr),
+ attribute.Int("nodes_count", len(task.Nodes)),
+ ))
+ defer span.End()
+
+ if task.Obj == nil {
+ p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ zap.Stringer("object", task.Addr),
+ zap.Error(errObjectNotDefined))
+ return
+ }
+
+ err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container))
+ if err != nil {
+ p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
+ zap.Stringer("object", task.Addr),
+ zap.Error(err))
+ }
+}
diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go
index 4939821005..a940cef377 100644
--- a/pkg/services/replicator/replicator.go
+++ b/pkg/services/replicator/replicator.go
@@ -4,9 +4,9 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
// Replicator represents the utility that replicates
@@ -23,9 +23,13 @@ type cfg struct {
log *logger.Logger
- remoteSender *putsvc.RemoteSender
+ remoteSender *objectwriter.RemoteSender
+
+ remoteGetter *getsvc.RemoteGetter
localStorage *engine.StorageEngine
+
+ metrics MetricsRegister
}
func defaultCfg() *cfg {
@@ -40,8 +44,6 @@ func New(opts ...Option) *Replicator {
opts[i](c)
}
- c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Replicator"))}
-
return &Replicator{
cfg: c,
}
@@ -62,15 +64,27 @@ func WithLogger(v *logger.Logger) Option {
}
// WithRemoteSender returns option to set remote object sender of Replicator.
-func WithRemoteSender(v *putsvc.RemoteSender) Option {
+func WithRemoteSender(v *objectwriter.RemoteSender) Option {
return func(c *cfg) {
c.remoteSender = v
}
}
+func WithRemoteGetter(v *getsvc.RemoteGetter) Option {
+ return func(c *cfg) {
+ c.remoteGetter = v
+ }
+}
+
// WithLocalStorage returns option to set local object storage of Replicator.
func WithLocalStorage(v *engine.StorageEngine) Option {
return func(c *cfg) {
c.localStorage = v
}
}
+
+func WithMetrics(v MetricsRegister) Option {
+ return func(c *cfg) {
+ c.metrics = v
+ }
+}
diff --git a/pkg/services/replicator/task.go b/pkg/services/replicator/task.go
index ec1b557885..a03f8dcaa4 100644
--- a/pkg/services/replicator/task.go
+++ b/pkg/services/replicator/task.go
@@ -1,6 +1,7 @@
package replicator
import (
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -8,31 +9,14 @@ import (
// Task represents group of Replicator task parameters.
type Task struct {
- quantity uint32
+ // NumCopies is the number of copies to replicate.
+ NumCopies uint32
+ // Addr is the address of the local object.
+ Addr oid.Address
+ // Obj is the object to avoid fetching it from the local storage.
+ Obj *objectSDK.Object
+ // Nodes is a list of potential object holders.
+ Nodes []netmap.NodeInfo
- addr oid.Address
-
- obj *objectSDK.Object
-
- nodes []netmap.NodeInfo
-}
-
-// SetCopiesNumber sets number of copies to replicate.
-func (t *Task) SetCopiesNumber(v uint32) {
- t.quantity = v
-}
-
-// SetObjectAddress sets address of local object.
-func (t *Task) SetObjectAddress(v oid.Address) {
- t.addr = v
-}
-
-// SetObject sets object to avoid fetching it from the local storage.
-func (t *Task) SetObject(obj *objectSDK.Object) {
- t.obj = obj
-}
-
-// SetNodes sets a list of potential object holders.
-func (t *Task) SetNodes(v []netmap.NodeInfo) {
- t.nodes = v
+ Container containerSDK.Container
}
diff --git a/pkg/services/reputation/common/deps.go b/pkg/services/reputation/common/deps.go
deleted file mode 100644
index ebb227b5e5..0000000000
--- a/pkg/services/reputation/common/deps.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package common
-
-import (
- "context"
- "io"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// Context wraps stdlib context
-// with accompanying meta values.
-type Context interface {
- context.Context
-
- // Must return epoch number to select the values.
- Epoch() uint64
-}
-
-// Writer describes the interface for storing reputation.Trust values.
-//
-// This interface is provided by both local storage
-// of values and remote (wrappers over the RPC).
-type Writer interface {
- // Write performs a write operation of reputation.Trust value
- // and returns any error encountered.
- //
- // All values after the Close call must be flushed to the
- // physical target. Implementations can cache values before
- // Close operation.
- //
- // Write must not be called after Close.
- Write(reputation.Trust) error
-
- // Close exits with method-providing Writer.
- //
- // All cached values must be flushed before
- // the Close's return.
- //
- // Methods must not be called after Close.
- io.Closer
-}
-
-// WriterProvider is a group of methods provided
-// by entity which generates keepers of
-// reputation.Trust values.
-type WriterProvider interface {
- // InitWriter should return an initialized Writer.
- //
- // Initialization problems are reported via error.
- // If no error was returned, then the Writer must not be nil.
- //
- // Implementations can have different logic for different
- // contexts, so specific ones may document their own behavior.
- InitWriter(Context) (Writer, error)
-}
-
-// ManagerBuilder defines an interface for providing a list
-// of Managers for specific epoch. Implementation depends on trust value.
-type ManagerBuilder interface {
- // BuildManagers must compose list of managers. It depends on
- // particular epoch and PeerID of the current route point.
- BuildManagers(epoch uint64, p apireputation.PeerID) ([]ServerInfo, error)
-}
-
-// ServerInfo describes a set of
-// characteristics of a point in a route.
-type ServerInfo interface {
- // PublicKey returns public key of the node
- // from the route in a binary representation.
- PublicKey() []byte
-
- // Iterates over network addresses of the node
- // in the route. Breaks iterating on true return
- // of the handler.
- IterateAddresses(func(string) bool)
-
- // Returns number of server's network addresses.
- NumberOfAddresses() int
-
- // ExternalAddresses returns external addresses of a node.
- ExternalAddresses() []string
-}
diff --git a/pkg/services/reputation/common/managers.go b/pkg/services/reputation/common/managers.go
deleted file mode 100644
index ef11b81228..0000000000
--- a/pkg/services/reputation/common/managers.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package common
-
-import (
- "fmt"
-
- netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apiNetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "git.frostfs.info/TrueCloudLab/hrw"
- "go.uber.org/zap"
-)
-
-// managerBuilder is implementation of reputation ManagerBuilder interface.
-// It sorts nodes in NetMap with HRW algorithms and
-// takes the next node after the current one as the only manager.
-type managerBuilder struct {
- log *logger.Logger
- nmSrc netmapcore.Source
- opts *mngOptions
-}
-
-// ManagersPrm groups the required parameters of the managerBuilder's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type ManagersPrm struct {
- NetMapSource netmapcore.Source
-}
-
-// NewManagerBuilder creates a new instance of the managerBuilder.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created managerBuilder does not require additional
-// initialization and is completely ready for work.
-func NewManagerBuilder(prm ManagersPrm, opts ...MngOption) ManagerBuilder {
- switch {
- case prm.NetMapSource == nil:
- panic(fmt.Sprintf("invalid NetMapSource (%T):%v", prm.NetMapSource, prm.NetMapSource))
- }
-
- o := defaultMngOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &managerBuilder{
- log: o.log,
- nmSrc: prm.NetMapSource,
- opts: o,
- }
-}
-
-// implements Server on apiNetmap.NodeInfo.
-type nodeServer apiNetmap.NodeInfo
-
-func (x nodeServer) PublicKey() []byte {
- return (apiNetmap.NodeInfo)(x).PublicKey()
-}
-
-func (x nodeServer) IterateAddresses(f func(string) bool) {
- (apiNetmap.NodeInfo)(x).IterateNetworkEndpoints(f)
-}
-
-func (x nodeServer) NumberOfAddresses() int {
- return (apiNetmap.NodeInfo)(x).NumberOfNetworkEndpoints()
-}
-
-func (x nodeServer) ExternalAddresses() []string {
- return (apiNetmap.NodeInfo)(x).ExternalAddresses()
-}
-
-// BuildManagers sorts nodes in NetMap with HRW algorithms and
-// takes the next node after the current one as the only manager.
-func (mb *managerBuilder) BuildManagers(epoch uint64, p apireputation.PeerID) ([]ServerInfo, error) {
- mb.log.Debug("start building managers",
- zap.Uint64("epoch", epoch),
- zap.Stringer("peer", p),
- )
-
- nm, err := mb.nmSrc.GetNetMapByEpoch(epoch)
- if err != nil {
- return nil, err
- }
-
- nmNodes := nm.Nodes()
-
- // make a copy to keep order consistency of the origin netmap after sorting
- nodes := make([]apiNetmap.NodeInfo, len(nmNodes))
-
- copy(nodes, nmNodes)
-
- hrw.SortHasherSliceByValue(nodes, epoch)
-
- for i := range nodes {
- if apireputation.ComparePeerKey(p, nodes[i].PublicKey()) {
- managerIndex := i + 1
-
- if managerIndex == len(nodes) {
- managerIndex = 0
- }
-
- return []ServerInfo{nodeServer(nodes[managerIndex])}, nil
- }
- }
-
- return nil, nil
-}
-
-type mngOptions struct {
- log *logger.Logger
-}
-
-type MngOption func(*mngOptions)
-
-func defaultMngOpts() *mngOptions {
- return &mngOptions{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns MngOption to specify logging component.
-func WithLogger(l *logger.Logger) MngOption {
- return func(o *mngOptions) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/common/router/calls.go b/pkg/services/reputation/common/router/calls.go
deleted file mode 100644
index 75cdf56ea3..0000000000
--- a/pkg/services/reputation/common/router/calls.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package router
-
-import (
- "encoding/hex"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "go.uber.org/zap"
-)
-
-// routeContext wraps context with additional passed
-// route data. It is only used inside Router and is
-// not passed in any external methods.
-type routeContext struct {
- common.Context
-
- passedRoute []common.ServerInfo
-}
-
-// NewRouteContext wraps the main context of value passing with its traversal route and epoch.
-func NewRouteContext(ctx common.Context, passed []common.ServerInfo) common.Context {
- return &routeContext{
- Context: ctx,
- passedRoute: passed,
- }
-}
-
-type trustWriter struct {
- router *Router
-
- routeCtx *routeContext
-
- routeMtx sync.RWMutex
- mServers map[string]common.Writer
-}
-
-// InitWriter initializes and returns Writer that sends each value to its next route point.
-//
-// If ctx was created by NewRouteContext, then the traversed route is taken into account,
-// and the value will be sent to its continuation. Otherwise, the route will be laid
-// from scratch and the value will be sent to its primary point.
-//
-// After building a list of remote points of the next leg of the route, the value is sent
-// sequentially to all of them. If any transmissions (even all) fail, an error will not
-// be returned.
-//
-// Close of the composed Writer calls Close method on each internal Writer generated in
-// runtime and never returns an error.
-//
-// Always returns nil error.
-func (r *Router) InitWriter(ctx common.Context) (common.Writer, error) {
- var (
- routeCtx *routeContext
- ok bool
- )
-
- if routeCtx, ok = ctx.(*routeContext); !ok {
- routeCtx = &routeContext{
- Context: ctx,
- passedRoute: []common.ServerInfo{r.localSrvInfo},
- }
- }
-
- return &trustWriter{
- router: r,
- routeCtx: routeCtx,
- mServers: make(map[string]common.Writer),
- }, nil
-}
-
-func (w *trustWriter) Write(t reputation.Trust) error {
- w.routeMtx.Lock()
- defer w.routeMtx.Unlock()
-
- route, err := w.router.routeBuilder.NextStage(w.routeCtx.Epoch(), t, w.routeCtx.passedRoute)
- if err != nil {
- return err
- } else if len(route) == 0 {
- route = []common.ServerInfo{nil}
- }
-
- for _, remoteInfo := range route {
- var key string
-
- if remoteInfo != nil {
- key = hex.EncodeToString(remoteInfo.PublicKey())
- }
-
- remoteWriter, ok := w.mServers[key]
- if !ok {
- provider, err := w.router.remoteProvider.InitRemote(remoteInfo)
- if err != nil {
- w.router.log.Debug("could not initialize writer provider",
- zap.String("error", err.Error()),
- )
-
- continue
- }
-
- // init writer with original context wrapped in routeContext
- remoteWriter, err = provider.InitWriter(w.routeCtx.Context)
- if err != nil {
- w.router.log.Debug("could not initialize writer",
- zap.String("error", err.Error()),
- )
-
- continue
- }
-
- w.mServers[key] = remoteWriter
- }
-
- err := remoteWriter.Write(t)
- if err != nil {
- w.router.log.Debug("could not write the value",
- zap.String("error", err.Error()),
- )
- }
- }
-
- return nil
-}
-
-func (w *trustWriter) Close() error {
- for key, wRemote := range w.mServers {
- err := wRemote.Close()
- if err != nil {
- w.router.log.Debug("could not close remote server writer",
- zap.String("key", key),
- zap.String("error", err.Error()),
- )
- }
- }
-
- return nil
-}
diff --git a/pkg/services/reputation/common/router/deps.go b/pkg/services/reputation/common/router/deps.go
deleted file mode 100644
index 36aecb59f0..0000000000
--- a/pkg/services/reputation/common/router/deps.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package router
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
-)
-
-// Builder groups methods to route values in the network.
-type Builder interface {
- // NextStage must return next group of route points
- // for passed epoch and trust values.
- // Implementation must take into account already passed route points.
- //
- // Empty passed list means being at the starting point of the route.
- //
- // Must return empty list and no error if the endpoint of the route is reached.
- NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error)
-}
-
-// RemoteWriterProvider describes the component
-// for sending values to a fixed route point.
-type RemoteWriterProvider interface {
- // InitRemote must return WriterProvider to the route point
- // corresponding to info.
- //
- // Nil info matches the end of the route.
- InitRemote(info common.ServerInfo) (common.WriterProvider, error)
-}
diff --git a/pkg/services/reputation/common/router/opts.go b/pkg/services/reputation/common/router/opts.go
deleted file mode 100644
index 1b34544122..0000000000
--- a/pkg/services/reputation/common/router/opts.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package router
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Router.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns Option to specify logging component.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/common/router/router.go b/pkg/services/reputation/common/router/router.go
deleted file mode 100644
index b80f6ce52b..0000000000
--- a/pkg/services/reputation/common/router/router.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package router
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// Prm groups the required parameters of the Router's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Characteristics of the local node's server.
- //
- // Must not be nil.
- LocalServerInfo common.ServerInfo
-
- // Component for sending values to a fixed route point.
- //
- // Must not be nil.
- RemoteWriterProvider RemoteWriterProvider
-
- // Route planner.
- //
- // Must not be nil.
- Builder Builder
-}
-
-// Router represents component responsible for routing
-// local trust values over the network.
-//
-// For each fixed pair (node peer, epoch) there is a
-// single value route on the network. Router provides the
-// interface for writing values to the next point of the route.
-//
-// For correct operation, Router must be created using
-// the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Router is immediately ready to work through API.
-type Router struct {
- log *logger.Logger
-
- remoteProvider RemoteWriterProvider
-
- routeBuilder Builder
-
- localSrvInfo common.ServerInfo
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-func New(prm Prm, opts ...Option) *Router {
- switch {
- case prm.RemoteWriterProvider == nil:
- panicOnPrmValue("RemoteWriterProvider", prm.RemoteWriterProvider)
- case prm.Builder == nil:
- panicOnPrmValue("Builder", prm.Builder)
- case prm.LocalServerInfo == nil:
- panicOnPrmValue("LocalServerInfo", prm.LocalServerInfo)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &Router{
- log: o.log,
- remoteProvider: prm.RemoteWriterProvider,
- routeBuilder: prm.Builder,
- localSrvInfo: prm.LocalServerInfo,
- }
-}
diff --git a/pkg/services/reputation/common/router/util.go b/pkg/services/reputation/common/router/util.go
deleted file mode 100644
index aa3190d2ba..0000000000
--- a/pkg/services/reputation/common/router/util.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package router
-
-import (
- "bytes"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
-)
-
-var errWrongRoute = errors.New("wrong route")
-
-// CheckRoute checks if the route is a route correctly constructed by the builder for value a.
-//
-// Returns nil if route is correct, otherwise an error clarifying the inconsistency.
-func CheckRoute(builder Builder, epoch uint64, t reputation.Trust, route []common.ServerInfo) error {
- for i := 1; i < len(route); i++ {
- servers, err := builder.NextStage(epoch, t, route[:i])
- if err != nil {
- return err
- } else if len(servers) == 0 {
- break
- }
-
- found := false
-
- for j := range servers {
- if bytes.Equal(servers[j].PublicKey(), route[i].PublicKey()) {
- found = true
- break
- }
- }
-
- if !found {
- return errWrongRoute
- }
- }
-
- return nil
-}
diff --git a/pkg/services/reputation/eigentrust/calculator/calculator.go b/pkg/services/reputation/eigentrust/calculator/calculator.go
deleted file mode 100644
index bfa274fea5..0000000000
--- a/pkg/services/reputation/eigentrust/calculator/calculator.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package eigentrustcalc
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
-)
-
-// Prm groups the required parameters of the Calculator's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Alpha parameter from origin EigenTrust algorithm
- // http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Ch.5.1.
- //
- // Must be in range (0, 1).
- AlphaProvider AlphaProvider
-
- // Source of initial node trust values
- //
- // Must not be nil.
- InitialTrustSource InitialTrustSource
-
- DaughterTrustSource DaughterTrustIteratorProvider
-
- IntermediateValueTarget common.WriterProvider
-
- FinalResultTarget IntermediateWriterProvider
-
- WorkerPool util.WorkerPool
-}
-
-// Calculator is a processor of a single iteration of EigenTrust algorithm.
-//
-// For correct operation, the Calculator must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Calculator is immediately ready to work through
-// API of external control of calculations and data transfer.
-type Calculator struct {
- alpha, beta reputation.TrustValue // beta = 1 - alpha
-
- prm Prm
-
- opts *options
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Calculator.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Calculator does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Calculator {
- switch {
- case prm.AlphaProvider == nil:
- panicOnPrmValue("AlphaProvider", prm.AlphaProvider)
- case prm.InitialTrustSource == nil:
- panicOnPrmValue("InitialTrustSource", prm.InitialTrustSource)
- case prm.DaughterTrustSource == nil:
- panicOnPrmValue("DaughterTrustSource", prm.DaughterTrustSource)
- case prm.IntermediateValueTarget == nil:
- panicOnPrmValue("IntermediateValueTarget", prm.IntermediateValueTarget)
- case prm.FinalResultTarget == nil:
- panicOnPrmValue("FinalResultTarget", prm.FinalResultTarget)
- case prm.WorkerPool == nil:
- panicOnPrmValue("WorkerPool", prm.WorkerPool)
- }
-
- o := defaultOpts()
-
- for _, opt := range opts {
- opt(o)
- }
-
- return &Calculator{
- prm: prm,
- opts: o,
- }
-}
diff --git a/pkg/services/reputation/eigentrust/calculator/calls.go b/pkg/services/reputation/eigentrust/calculator/calls.go
deleted file mode 100644
index 09286d7985..0000000000
--- a/pkg/services/reputation/eigentrust/calculator/calls.go
+++ /dev/null
@@ -1,289 +0,0 @@
-package eigentrustcalc
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "go.uber.org/zap"
-)
-
-type CalculatePrm struct {
- last bool
-
- ei eigentrust.EpochIteration
-}
-
-func (p *CalculatePrm) SetLast(last bool) {
- p.last = last
-}
-
-func (p *CalculatePrm) SetEpochIteration(ei eigentrust.EpochIteration) {
- p.ei = ei
-}
-
-func (c *Calculator) Calculate(prm CalculatePrm) {
- alpha, err := c.prm.AlphaProvider.EigenTrustAlpha()
- if err != nil {
- c.opts.log.Debug(
- "failed to get alpha param",
- zap.Error(err),
- )
- return
- }
-
- c.alpha = reputation.TrustValueFromFloat64(alpha)
- c.beta = reputation.TrustValueFromFloat64(1 - alpha)
-
- ctx := eigentrust.IterContext{
- Context: context.Background(),
- EpochIteration: prm.ei,
- }
-
- iter := ctx.I()
-
- log := c.opts.log.With(
- zap.Uint64("epoch", ctx.Epoch()),
- zap.Uint32("iteration", iter),
- )
-
- if iter == 0 {
- c.sendInitialValues(ctx)
- return
- }
-
- // decrement iteration number to select the values collected
- // on the previous stage
- ctx.SetI(iter - 1)
-
- consumersIter, err := c.prm.DaughterTrustSource.InitConsumersIterator(ctx)
- if err != nil {
- log.Debug("consumers trust iterator's init failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // continue with initial iteration number
- ctx.SetI(iter)
-
- err = consumersIter.Iterate(func(daughter apireputation.PeerID, iter TrustIterator) error {
- err := c.prm.WorkerPool.Submit(func() {
- c.iterateDaughter(iterDaughterPrm{
- lastIter: prm.last,
- ctx: ctx,
- id: daughter,
- consumersIter: iter,
- })
- })
- if err != nil {
- log.Debug("worker pool submit failure",
- zap.String("error", err.Error()),
- )
- }
-
- // don't stop trying
- return nil
- })
- if err != nil {
- log.Debug("iterate daughter's consumers failed",
- zap.String("error", err.Error()),
- )
- }
-}
-
-type iterDaughterPrm struct {
- lastIter bool
-
- ctx Context
-
- id apireputation.PeerID
-
- consumersIter TrustIterator
-}
-
-func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
- initTrust, err := c.prm.InitialTrustSource.InitialTrust(p.id)
- if err != nil {
- c.opts.log.Debug("get initial trust failure",
- zap.Stringer("daughter", p.id),
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- daughterIter, err := c.prm.DaughterTrustSource.InitDaughterIterator(p.ctx, p.id)
- if err != nil {
- c.opts.log.Debug("daughter trust iterator's init failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- sum := reputation.TrustZero
-
- err = p.consumersIter.Iterate(func(trust reputation.Trust) error {
- if !p.lastIter {
- select {
- case <-p.ctx.Done():
- return p.ctx.Err()
- default:
- }
- }
-
- sum.Add(trust.Value())
- return nil
- })
- if err != nil {
- c.opts.log.Debug("iterate over daughter's trusts failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // Alpha * Pd
- initTrust.Mul(c.alpha)
-
- sum.Mul(c.beta)
- sum.Add(initTrust)
-
- var intermediateTrust eigentrust.IterationTrust
-
- intermediateTrust.SetEpoch(p.ctx.Epoch())
- intermediateTrust.SetPeer(p.id)
- intermediateTrust.SetI(p.ctx.I())
-
- if p.lastIter {
- finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ctx)
- if err != nil {
- c.opts.log.Debug("init writer failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- intermediateTrust.SetValue(sum)
-
- err = finalWriter.WriteIntermediateTrust(intermediateTrust)
- if err != nil {
- c.opts.log.Debug("write final result failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
- } else {
- intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ctx)
- if err != nil {
- c.opts.log.Debug("init writer failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- err = daughterIter.Iterate(func(trust reputation.Trust) error {
- select {
- case <-p.ctx.Done():
- return p.ctx.Err()
- default:
- }
-
- val := trust.Value()
- val.Mul(sum)
-
- trust.SetValue(val)
-
- err := intermediateWriter.Write(trust)
- if err != nil {
- c.opts.log.Debug("write value failure",
- zap.String("error", err.Error()),
- )
- }
-
- return nil
- })
- if err != nil {
- c.opts.log.Debug("iterate daughter trusts failure",
- zap.String("error", err.Error()),
- )
- }
-
- err = intermediateWriter.Close()
- if err != nil {
- c.opts.log.Error(
- "could not close writer",
- zap.String("error", err.Error()),
- )
- }
- }
-}
-
-func (c *Calculator) sendInitialValues(ctx Context) {
- daughterIter, err := c.prm.DaughterTrustSource.InitAllDaughtersIterator(ctx)
- if err != nil {
- c.opts.log.Debug("all daughters trust iterator's init failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(ctx)
- if err != nil {
- c.opts.log.Debug("init writer failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- err = daughterIter.Iterate(func(daughter apireputation.PeerID, iterator TrustIterator) error {
- return iterator.Iterate(func(trust reputation.Trust) error {
- trusted := trust.Peer()
-
- initTrust, err := c.prm.InitialTrustSource.InitialTrust(trusted)
- if err != nil {
- c.opts.log.Debug("get initial trust failure",
- zap.Stringer("peer", trusted),
- zap.String("error", err.Error()),
- )
-
- // don't stop on single failure
- return nil
- }
-
- initTrust.Mul(trust.Value())
- trust.SetValue(initTrust)
-
- err = intermediateWriter.Write(trust)
- if err != nil {
- c.opts.log.Debug("write value failure",
- zap.String("error", err.Error()),
- )
-
- // don't stop on single failure
- }
-
- return nil
- })
- })
- if err != nil {
- c.opts.log.Debug("iterate over all daughters failure",
- zap.String("error", err.Error()),
- )
- }
-
- err = intermediateWriter.Close()
- if err != nil {
- c.opts.log.Debug("could not close writer",
- zap.String("error", err.Error()),
- )
- }
-}
diff --git a/pkg/services/reputation/eigentrust/calculator/deps.go b/pkg/services/reputation/eigentrust/calculator/deps.go
deleted file mode 100644
index 66d3fd3011..0000000000
--- a/pkg/services/reputation/eigentrust/calculator/deps.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package eigentrustcalc
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-type Context interface {
- context.Context
-
- // Must return epoch number to select the values
- // for global trust calculation.
- Epoch() uint64
-
- // Must return the sequence number of the iteration.
- I() uint32
-}
-
-// InitialTrustSource must provide initial(non-calculated)
-// trusts to current node's daughter. Realization may depends
-// on daughter.
-type InitialTrustSource interface {
- InitialTrust(apireputation.PeerID) (reputation.TrustValue, error)
-}
-
-// TrustIterator must iterate over all retrieved(or calculated) trusts
-// and call passed TrustHandler on them.
-type TrustIterator interface {
- Iterate(reputation.TrustHandler) error
-}
-
-type PeerTrustsHandler func(apireputation.PeerID, TrustIterator) error
-
-// PeerTrustsIterator must iterate over all nodes(PeerIDs) and provide
-// TrustIterator for iteration over node's Trusts to others peers.
-type PeerTrustsIterator interface {
- Iterate(PeerTrustsHandler) error
-}
-
-type DaughterTrustIteratorProvider interface {
- // InitDaughterIterator must init TrustIterator
- // that iterates over received local trusts from
- // daughter p for ctx.Epoch() epoch.
- InitDaughterIterator(ctx Context, p apireputation.PeerID) (TrustIterator, error)
- // InitAllDaughtersIterator must init PeerTrustsIterator
- // that must iterate over all daughters of the current
- // node(manager) and all trusts received from them for
- // ctx.Epoch() epoch.
- InitAllDaughtersIterator(ctx Context) (PeerTrustsIterator, error)
- // InitConsumersIterator must init PeerTrustsIterator
- // that must iterate over all daughters of the current
- // node(manager) and their consumers' trusts received
- // from other managers for ctx.Epoch() epoch and
- // ctx.I() iteration.
- InitConsumersIterator(Context) (PeerTrustsIterator, error)
-}
-
-// IntermediateWriter must write intermediate result to contract.
-// It may depends on realization either trust is sent directly to contract
-// or via redirecting to other node.
-type IntermediateWriter interface {
- WriteIntermediateTrust(eigentrust.IterationTrust) error
-}
-
-// IntermediateWriterProvider must provide ready-to-work
-// IntermediateWriter.
-type IntermediateWriterProvider interface {
- InitIntermediateWriter(Context) (IntermediateWriter, error)
-}
-
-// AlphaProvider must provide information about required
-// alpha parameter for eigen trust algorithm.
-type AlphaProvider interface {
- EigenTrustAlpha() (float64, error)
-}
diff --git a/pkg/services/reputation/eigentrust/calculator/opts.go b/pkg/services/reputation/eigentrust/calculator/opts.go
deleted file mode 100644
index e1e5723611..0000000000
--- a/pkg/services/reputation/eigentrust/calculator/opts.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package eigentrustcalc
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Controller.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns option to specify logging component.
-//
-// Ignores nil values.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/eigentrust/controller/calls.go b/pkg/services/reputation/eigentrust/controller/calls.go
deleted file mode 100644
index 87d23c7f29..0000000000
--- a/pkg/services/reputation/eigentrust/controller/calls.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package eigentrustctrl
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- "go.uber.org/zap"
-)
-
-// ContinuePrm groups the required parameters of Continue operation.
-type ContinuePrm struct {
- Epoch uint64
-}
-
-type iterContext struct {
- context.Context
-
- eigentrust.EpochIteration
-
- iterationNumber uint32
- last bool
-}
-
-func (x iterContext) Last() bool {
- return x.last
-}
-
-type iterContextCancel struct {
- iterContext
-
- cancel context.CancelFunc
-}
-
-// Continue moves the global reputation calculator to the next iteration.
-func (c *Controller) Continue(prm ContinuePrm) {
- c.mtx.Lock()
-
- {
- iterCtx, ok := c.mCtx[prm.Epoch]
- if !ok {
- iterCtx = new(iterContextCancel)
- c.mCtx[prm.Epoch] = iterCtx
-
- iterCtx.Context, iterCtx.cancel = context.WithCancel(context.Background())
- iterCtx.EpochIteration.SetEpoch(prm.Epoch)
-
- iterations, err := c.prm.IterationsProvider.EigenTrustIterations()
- if err != nil {
- c.opts.log.Error("could not get EigenTrust iteration number",
- zap.Error(err),
- )
- } else {
- iterCtx.iterationNumber = uint32(iterations)
- }
- } else {
- iterCtx.cancel()
- }
-
- iterCtx.last = iterCtx.I() == iterCtx.iterationNumber-1
-
- err := c.prm.WorkerPool.Submit(func() {
- c.prm.DaughtersTrustCalculator.Calculate(iterCtx.iterContext)
-
- // iteration++
- iterCtx.Increment()
- })
- if err != nil {
- c.opts.log.Debug("iteration submit failure",
- zap.String("error", err.Error()),
- )
- }
-
- if iterCtx.last {
- // will only live while the application is alive.
- // during normal operation of the system. Also, such information
- // number as already processed, but in any case it grows up
- // In this case and worker pool failure we can mark epoch
- delete(c.mCtx, prm.Epoch)
- }
- }
-
- c.mtx.Unlock()
-}
diff --git a/pkg/services/reputation/eigentrust/controller/controller.go b/pkg/services/reputation/eigentrust/controller/controller.go
deleted file mode 100644
index fe9150bcfc..0000000000
--- a/pkg/services/reputation/eigentrust/controller/controller.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package eigentrustctrl
-
-import (
- "fmt"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
-)
-
-// Prm groups the required parameters of the Controller's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Component of computing iteration of EigenTrust algorithm.
- //
- // Must not be nil.
- DaughtersTrustCalculator DaughtersTrustCalculator
-
- // IterationsProvider provides information about numbers
- // of iterations for algorithm.
- IterationsProvider IterationsProvider
-
- // Routine execution pool for single epoch iteration.
- WorkerPool util.WorkerPool
-}
-
-// Controller represents EigenTrust algorithm transient controller.
-//
-// Controller's main goal is to separate the two main stages of
-// the calculation:
-// 1. reporting local values to manager nodes
-// 2. calculating global trusts of child nodes
-//
-// Calculation stages are controlled based on external signals
-// that come from the application through the Controller's API.
-//
-// For correct operation, the controller must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the constructor is immediately ready to work through
-// API of external control of calculations and data transfer.
-type Controller struct {
- prm Prm
-
- opts *options
-
- mtx sync.Mutex
- mCtx map[uint64]*iterContextCancel
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Controller.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Controller does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Controller {
- switch {
- case prm.IterationsProvider == nil:
- panicOnPrmValue("IterationNumber", prm.IterationsProvider)
- case prm.WorkerPool == nil:
- panicOnPrmValue("WorkerPool", prm.WorkerPool)
- case prm.DaughtersTrustCalculator == nil:
- panicOnPrmValue("DaughtersTrustCalculator", prm.DaughtersTrustCalculator)
- }
-
- o := defaultOpts()
-
- for _, opt := range opts {
- opt(o)
- }
-
- return &Controller{
- prm: prm,
- opts: o,
- mCtx: make(map[uint64]*iterContextCancel),
- }
-}
diff --git a/pkg/services/reputation/eigentrust/controller/deps.go b/pkg/services/reputation/eigentrust/controller/deps.go
deleted file mode 100644
index 8c4752657e..0000000000
--- a/pkg/services/reputation/eigentrust/controller/deps.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package eigentrustctrl
-
-import (
- "context"
-)
-
-// IterationContext is a context of the i-th
-// stage of iterative EigenTrust algorithm.
-type IterationContext interface {
- context.Context
-
- // Must return epoch number to select the values
- // for global trust calculation.
- Epoch() uint64
-
- // Must return the sequence number of the iteration.
- I() uint32
-
- // Must return true if I() is the last iteration.
- Last() bool
-}
-
-// DaughtersTrustCalculator is an interface of entity
-// responsible for calculating the global trust of
-// daughter nodes in terms of EigenTrust algorithm.
-type DaughtersTrustCalculator interface {
- // Must perform the iteration step of the loop
- // for computing the global trust of all daughter
- // nodes and sending intermediate values
- // according to EigenTrust description
- // http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Ch.5.1.
- //
- // Execution should be interrupted if ctx.Last().
- Calculate(ctx IterationContext)
-}
-
-// IterationsProvider must provide information about numbers
-// of iterations for algorithm.
-type IterationsProvider interface {
- EigenTrustIterations() (uint64, error)
-}
diff --git a/pkg/services/reputation/eigentrust/controller/opts.go b/pkg/services/reputation/eigentrust/controller/opts.go
deleted file mode 100644
index 16bc61c2f1..0000000000
--- a/pkg/services/reputation/eigentrust/controller/opts.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package eigentrustctrl
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Controller.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns option to specify logging component.
-//
-// Ignores nil values.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/eigentrust/iteration.go b/pkg/services/reputation/eigentrust/iteration.go
deleted file mode 100644
index 90a214ebcb..0000000000
--- a/pkg/services/reputation/eigentrust/iteration.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package eigentrust
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
-)
-
-type EpochIteration struct {
- e uint64
- i uint32
-}
-
-func (x EpochIteration) Epoch() uint64 {
- return x.e
-}
-
-func (x *EpochIteration) SetEpoch(e uint64) {
- x.e = e
-}
-
-func (x EpochIteration) I() uint32 {
- return x.i
-}
-
-func (x *EpochIteration) SetI(i uint32) {
- x.i = i
-}
-
-func (x *EpochIteration) Increment() {
- x.i++
-}
-
-type IterationTrust struct {
- EpochIteration
- reputation.Trust
-}
-
-// IterContext aggregates context and data required for
-// iterations.
-type IterContext struct {
- context.Context
- EpochIteration
-}
-
-func NewIterContext(ctx context.Context, epoch uint64, iter uint32) *IterContext {
- ei := EpochIteration{}
-
- ei.SetI(iter)
- ei.SetEpoch(epoch)
-
- return &IterContext{
- Context: ctx,
- EpochIteration: ei,
- }
-}
diff --git a/pkg/services/reputation/eigentrust/routes/builder.go b/pkg/services/reputation/eigentrust/routes/builder.go
deleted file mode 100644
index ddd5a2ae0d..0000000000
--- a/pkg/services/reputation/eigentrust/routes/builder.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package routes
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// Prm groups the required parameters of the Builder's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Manager builder for current node.
- //
- // Must not be nil.
- ManagerBuilder common.ManagerBuilder
-
- Log *logger.Logger
-}
-
-// Builder represents component that routes node to its managers.
-//
-// For correct operation, Builder must be created using
-// the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Builder is immediately ready to work through API.
-type Builder struct {
- managerBuilder common.ManagerBuilder
- log *logger.Logger
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Builder.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Builder does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm) *Builder {
- switch {
- case prm.ManagerBuilder == nil:
- panicOnPrmValue("ManagerBuilder", prm.ManagerBuilder)
- case prm.Log == nil:
- panicOnPrmValue("Logger", prm.Log)
- }
-
- return &Builder{
- managerBuilder: prm.ManagerBuilder,
- log: prm.Log,
- }
-}
diff --git a/pkg/services/reputation/eigentrust/routes/calls.go b/pkg/services/reputation/eigentrust/routes/calls.go
deleted file mode 100644
index c4d9688a97..0000000000
--- a/pkg/services/reputation/eigentrust/routes/calls.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package routes
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "go.uber.org/zap"
-)
-
-// NextStage builds Manager list for trusted node and returns it directly.
-//
-// If passed route has more than one point, then endpoint of the route is reached.
-func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) {
- passedLen := len(passed)
-
- b.log.Debug("building next stage for trust route",
- zap.Uint64("epoch", epoch),
- zap.Int("passed_length", passedLen),
- )
-
- if passedLen > 1 {
- return nil, nil
- }
-
- route, err := b.managerBuilder.BuildManagers(epoch, t.Peer())
- if err != nil {
- return nil, fmt.Errorf("could not build managers for epoch: %d: %w", epoch, err)
- }
-
- return route, nil
-}
diff --git a/pkg/services/reputation/eigentrust/storage/consumers/calls.go b/pkg/services/reputation/eigentrust/storage/consumers/calls.go
deleted file mode 100644
index 55a4d6f3d7..0000000000
--- a/pkg/services/reputation/eigentrust/storage/consumers/calls.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package consumerstorage
-
-import (
- "fmt"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// Put saves intermediate trust of the consumer to daughter peer.
-func (x *Storage) Put(trust eigentrust.IterationTrust) {
- var s *iterationConsumersStorage
-
- x.mtx.Lock()
-
- {
- epoch := trust.Epoch()
-
- s = x.mItems[epoch]
- if s == nil {
- s = &iterationConsumersStorage{
- mItems: make(map[uint32]*ConsumersStorage, 1),
- }
-
- x.mItems[epoch] = s
- }
- }
-
- x.mtx.Unlock()
-
- s.put(trust)
-}
-
-// Consumers returns the storage of trusts of the consumers of the daughter peers
-// for particular iteration of EigenTrust calculation for particular epoch.
-//
-// Returns false if there is no data for the epoch and iter.
-func (x *Storage) Consumers(epoch uint64, iter uint32) (*ConsumersStorage, bool) {
- var (
- s *iterationConsumersStorage
- ok bool
- )
-
- x.mtx.Lock()
-
- {
- s, ok = x.mItems[epoch]
- }
-
- x.mtx.Unlock()
-
- if !ok {
- return nil, false
- }
-
- return s.consumers(iter)
-}
-
-// maps iteration numbers of EigenTrust algorithm to repositories
-// of the trusts of the consumers of the daughter peers.
-type iterationConsumersStorage struct {
- mtx sync.RWMutex
-
- mItems map[uint32]*ConsumersStorage
-}
-
-func (x *iterationConsumersStorage) put(trust eigentrust.IterationTrust) {
- var s *ConsumersStorage
-
- x.mtx.Lock()
-
- {
- iter := trust.I()
-
- s = x.mItems[iter]
- if s == nil {
- s = &ConsumersStorage{
- mItems: make(map[string]*ConsumersTrusts, 1),
- }
-
- x.mItems[iter] = s
- }
- }
-
- x.mtx.Unlock()
-
- s.put(trust)
-}
-
-func (x *iterationConsumersStorage) consumers(iter uint32) (s *ConsumersStorage, ok bool) {
- x.mtx.Lock()
-
- {
- s, ok = x.mItems[iter]
- }
-
- x.mtx.Unlock()
-
- return
-}
-
-// ConsumersStorage represents in-memory storage of intermediate trusts
-// of the peer consumers.
-//
-// Maps daughter peers to repositories of the trusts of their consumers.
-type ConsumersStorage struct {
- mtx sync.RWMutex
-
- mItems map[string]*ConsumersTrusts
-}
-
-func (x *ConsumersStorage) put(trust eigentrust.IterationTrust) {
- var s *ConsumersTrusts
-
- x.mtx.Lock()
-
- {
- daughter := trust.Peer().EncodeToString()
-
- s = x.mItems[daughter]
- if s == nil {
- s = &ConsumersTrusts{
- mItems: make(map[string]reputation.Trust, 1),
- }
-
- x.mItems[daughter] = s
- }
- }
-
- x.mtx.Unlock()
-
- s.put(trust)
-}
-
-// Iterate passes IDs of the daughter peers with the trusts of their consumers to h.
-//
-// Returns errors from h directly.
-func (x *ConsumersStorage) Iterate(h eigentrustcalc.PeerTrustsHandler) (err error) {
- x.mtx.RLock()
-
- {
- for strTrusted, trusts := range x.mItems {
- var trusted apireputation.PeerID
-
- if strTrusted != "" {
- err = trusted.DecodeString(strTrusted)
- if err != nil {
- panic(fmt.Sprintf("decode peer ID string %s: %v", strTrusted, err))
- }
- }
-
- if err = h(trusted, trusts); err != nil {
- break
- }
- }
- }
-
- x.mtx.RUnlock()
-
- return
-}
-
-// ConsumersTrusts represents in-memory storage of the trusts
-// of the consumer peers to some other peer.
-type ConsumersTrusts struct {
- mtx sync.RWMutex
-
- mItems map[string]reputation.Trust
-}
-
-func (x *ConsumersTrusts) put(trust eigentrust.IterationTrust) {
- x.mtx.Lock()
-
- {
- x.mItems[trust.TrustingPeer().EncodeToString()] = trust.Trust
- }
-
- x.mtx.Unlock()
-}
-
-// Iterate passes all stored trusts to h.
-//
-// Returns errors from h directly.
-func (x *ConsumersTrusts) Iterate(h reputation.TrustHandler) (err error) {
- x.mtx.RLock()
-
- {
- for _, trust := range x.mItems {
- if err = h(trust); err != nil {
- break
- }
- }
- }
-
- x.mtx.RUnlock()
-
- return
-}
diff --git a/pkg/services/reputation/eigentrust/storage/consumers/storage.go b/pkg/services/reputation/eigentrust/storage/consumers/storage.go
deleted file mode 100644
index ee811d84b2..0000000000
--- a/pkg/services/reputation/eigentrust/storage/consumers/storage.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package consumerstorage
-
-import (
- "sync"
-)
-
-// Prm groups the required parameters of the Storage's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-//
-// The component is not parameterizable at the moment.
-type Prm struct{}
-
-// Storage represents in-memory storage of the trusts
-// of the consumer peers.
-//
-// It maps epoch numbers to the repositories of intermediate
-// trusts of the consumers of the daughter peers.
-//
-// For correct operation, Storage must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// Storage is immediately ready to work through API.
-type Storage struct {
- mtx sync.RWMutex
-
- mItems map[uint64]*iterationConsumersStorage
-}
-
-// New creates a new instance of the Storage.
-//
-// The created Storage does not require additional
-// initialization and is completely ready for work.
-func New(_ Prm) *Storage {
- return &Storage{
- mItems: make(map[uint64]*iterationConsumersStorage),
- }
-}
diff --git a/pkg/services/reputation/eigentrust/storage/daughters/calls.go b/pkg/services/reputation/eigentrust/storage/daughters/calls.go
deleted file mode 100644
index eb229365e7..0000000000
--- a/pkg/services/reputation/eigentrust/storage/daughters/calls.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package daughters
-
-import (
- "fmt"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// Put saves daughter peer's trust to its provider for the epoch.
-func (x *Storage) Put(epoch uint64, trust reputation.Trust) {
- var s *DaughterStorage
-
- x.mtx.Lock()
-
- {
- s = x.mItems[epoch]
- if s == nil {
- s = &DaughterStorage{
- mItems: make(map[string]*DaughterTrusts, 1),
- }
-
- x.mItems[epoch] = s
- }
- }
-
- x.mtx.Unlock()
-
- s.put(trust)
-}
-
-// DaughterTrusts returns daughter trusts for the epoch.
-//
-// Returns false if there is no data for the epoch and daughter.
-func (x *Storage) DaughterTrusts(epoch uint64, daughter apireputation.PeerID) (*DaughterTrusts, bool) {
- var (
- s *DaughterStorage
- ok bool
- )
-
- x.mtx.RLock()
-
- {
- s, ok = x.mItems[epoch]
- }
-
- x.mtx.RUnlock()
-
- if !ok {
- return nil, false
- }
-
- return s.daughterTrusts(daughter)
-}
-
-// AllDaughterTrusts returns daughter iterator for the epoch.
-//
-// Returns false if there is no data for the epoch and daughter.
-func (x *Storage) AllDaughterTrusts(epoch uint64) (*DaughterStorage, bool) {
- x.mtx.RLock()
- defer x.mtx.RUnlock()
-
- s, ok := x.mItems[epoch]
-
- return s, ok
-}
-
-// DaughterStorage maps IDs of daughter peers to repositories of the local trusts to their providers.
-type DaughterStorage struct {
- mtx sync.RWMutex
-
- mItems map[string]*DaughterTrusts
-}
-
-// Iterate passes IDs of the daughter peers with their trusts to h.
-//
-// Returns errors from h directly.
-func (x *DaughterStorage) Iterate(h eigentrustcalc.PeerTrustsHandler) (err error) {
- x.mtx.RLock()
-
- {
- for strDaughter, daughterTrusts := range x.mItems {
- var daughter apireputation.PeerID
-
- if strDaughter != "" {
- err = daughter.DecodeString(strDaughter)
- if err != nil {
- panic(fmt.Sprintf("decode peer ID string %s: %v", strDaughter, err))
- }
- }
-
- if err = h(daughter, daughterTrusts); err != nil {
- break
- }
- }
- }
-
- x.mtx.RUnlock()
-
- return
-}
-
-func (x *DaughterStorage) put(trust reputation.Trust) {
- var dt *DaughterTrusts
-
- x.mtx.Lock()
-
- {
- trusting := trust.TrustingPeer().EncodeToString()
-
- dt = x.mItems[trusting]
- if dt == nil {
- dt = &DaughterTrusts{
- mItems: make(map[string]reputation.Trust, 1),
- }
-
- x.mItems[trusting] = dt
- }
- }
-
- x.mtx.Unlock()
-
- dt.put(trust)
-}
-
-func (x *DaughterStorage) daughterTrusts(id apireputation.PeerID) (dt *DaughterTrusts, ok bool) {
- x.mtx.RLock()
-
- {
- dt, ok = x.mItems[id.EncodeToString()]
- }
-
- x.mtx.RUnlock()
-
- return
-}
-
-// DaughterTrusts represents in-memory storage of local trusts
-// of the daughter peer to its providers.
-//
-// Maps IDs of daughter's providers to the local trusts to them.
-type DaughterTrusts struct {
- mtx sync.RWMutex
-
- mItems map[string]reputation.Trust
-}
-
-func (x *DaughterTrusts) put(trust reputation.Trust) {
- x.mtx.Lock()
-
- {
- x.mItems[trust.Peer().EncodeToString()] = trust
- }
-
- x.mtx.Unlock()
-}
-
-// Iterate passes all stored trusts to h.
-//
-// Returns errors from h directly.
-func (x *DaughterTrusts) Iterate(h reputation.TrustHandler) (err error) {
- x.mtx.RLock()
-
- {
- for _, trust := range x.mItems {
- if err = h(trust); err != nil {
- break
- }
- }
- }
-
- x.mtx.RUnlock()
-
- return
-}
diff --git a/pkg/services/reputation/eigentrust/storage/daughters/storage.go b/pkg/services/reputation/eigentrust/storage/daughters/storage.go
deleted file mode 100644
index 26399fce44..0000000000
--- a/pkg/services/reputation/eigentrust/storage/daughters/storage.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package daughters
-
-import "sync"
-
-// Prm groups the required parameters of the Storage's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-//
-// The component is not parameterizable at the moment.
-type Prm struct{}
-
-// Storage represents in-memory storage of local trust
-// values of the daughter peers.
-//
-// It maps epoch numbers to the repositories of local trusts
-// of the daughter peers.
-//
-// For correct operation, Storage must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// Storage is immediately ready to work through API.
-type Storage struct {
- mtx sync.RWMutex
-
- mItems map[uint64]*DaughterStorage
-}
-
-// New creates a new instance of the Storage.
-//
-// The created Storage does not require additional
-// initialization and is completely ready for work.
-func New(_ Prm) *Storage {
- return &Storage{
- mItems: make(map[uint64]*DaughterStorage),
- }
-}
diff --git a/pkg/services/reputation/local/controller/calls.go b/pkg/services/reputation/local/controller/calls.go
deleted file mode 100644
index d4e122b0ea..0000000000
--- a/pkg/services/reputation/local/controller/calls.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package trustcontroller
-
-import (
- "context"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// ReportPrm groups the required parameters of the Controller.Report method.
-type ReportPrm struct {
- epoch uint64
-}
-
-// SetEpoch sets epoch number to select reputation values.
-func (p *ReportPrm) SetEpoch(e uint64) {
- p.epoch = e
-}
-
-// Report reports local reputation values.
-//
-// Single Report operation overtakes all data from LocalTrustSource
-// to LocalTrustTarget (Controller's parameters).
-//
-// Each call acquires a report context for an Epoch parameter.
-// At the very end of the operation, the context is released.
-func (c *Controller) Report(prm ReportPrm) {
- // acquire report
- reportCtx := c.acquireReport(prm.epoch)
- if reportCtx == nil {
- return
- }
-
- // report local trust values
- reportCtx.report()
-
- // finally stop and free the report
- c.freeReport(prm.epoch, reportCtx.log)
-}
-
-type reportContext struct {
- epoch uint64
-
- ctrl *Controller
-
- log *logger.Logger
-
- ctx common.Context
-}
-
-type iteratorContext struct {
- context.Context
-
- epoch uint64
-}
-
-func (c iteratorContext) Epoch() uint64 {
- return c.epoch
-}
-
-func (c *Controller) acquireReport(epoch uint64) *reportContext {
- var ctx context.Context
-
- c.mtx.Lock()
-
- {
- if cancel := c.mCtx[epoch]; cancel == nil {
- ctx, cancel = context.WithCancel(context.Background())
- c.mCtx[epoch] = cancel
- }
- }
-
- c.mtx.Unlock()
-
- log := &logger.Logger{Logger: c.opts.log.With(
- zap.Uint64("epoch", epoch),
- )}
-
- if ctx == nil {
- log.Debug("report is already started")
- return nil
- }
-
- return &reportContext{
- epoch: epoch,
- ctrl: c,
- log: log,
- ctx: &iteratorContext{
- Context: ctx,
- epoch: epoch,
- },
- }
-}
-
-func (c *reportContext) report() {
- c.log.Debug("starting to report local trust values")
-
- // initialize iterator over locally collected values
- iterator, err := c.ctrl.prm.LocalTrustSource.InitIterator(c.ctx)
- if err != nil {
- c.log.Debug("could not initialize iterator over local trust values",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // initialize target of local trust values
- targetWriter, err := c.ctrl.prm.LocalTrustTarget.InitWriter(c.ctx)
- if err != nil {
- c.log.Debug("could not initialize local trust target",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // iterate over all values and write them to the target
- err = iterator.Iterate(
- func(t reputation.Trust) error {
- // check if context is done
- if err := c.ctx.Err(); err != nil {
- return err
- }
-
- return targetWriter.Write(t)
- },
- )
- if err != nil && !errors.Is(err, context.Canceled) {
- c.log.Debug("iterator over local trust failed",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // finish writing
- err = targetWriter.Close()
- if err != nil {
- c.log.Debug("could not finish writing local trust values",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- c.log.Debug("reporting successfully finished")
-}
-
-func (c *Controller) freeReport(epoch uint64, log *logger.Logger) {
- var stopped bool
-
- c.mtx.Lock()
-
- {
- var cancel context.CancelFunc
-
- cancel, stopped = c.mCtx[epoch]
-
- if stopped {
- cancel()
- delete(c.mCtx, epoch)
- }
- }
-
- c.mtx.Unlock()
-
- if stopped {
- log.Debug("reporting successfully interrupted")
- } else {
- log.Debug("reporting is not started or already interrupted")
- }
-}
-
-// StopPrm groups the required parameters of the Controller.Stop method.
-type StopPrm struct {
- epoch uint64
-}
-
-// SetEpoch sets epoch number the processing of the values of which must be interrupted.
-func (p *StopPrm) SetEpoch(e uint64) {
- p.epoch = e
-}
-
-// Stop interrupts the processing of local trust values.
-//
-// Releases acquired report context.
-func (c *Controller) Stop(prm StopPrm) {
- c.freeReport(
- prm.epoch,
- &logger.Logger{Logger: c.opts.log.With(zap.Uint64("epoch", prm.epoch))},
- )
-}
diff --git a/pkg/services/reputation/local/controller/controller.go b/pkg/services/reputation/local/controller/controller.go
deleted file mode 100644
index 7bf56be89a..0000000000
--- a/pkg/services/reputation/local/controller/controller.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package trustcontroller
-
-import (
- "context"
- "fmt"
- "sync"
-
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
-)
-
-// Prm groups the required parameters of the Controller's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Iterator over the reputation values
- // collected by the node locally.
- //
- // Must not be nil.
- LocalTrustSource IteratorProvider
-
- // Place of recording the local values of
- // trust to other nodes.
- //
- // Must not be nil.
- LocalTrustTarget reputationcommon.WriterProvider
-}
-
-// Controller represents main handler for starting
-// and interrupting the reporting local trust values.
-//
-// It binds the interfaces of the local value stores
-// to the target storage points. Controller is abstracted
-// from the internal storage device and the network location
-// of the connecting components. At its core, it is a
-// high-level start-stop trigger for reporting.
-//
-// For correct operation, the controller must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the constructor is immediately ready to work through
-// API of external control of calculations and data transfer.
-type Controller struct {
- prm Prm
-
- opts *options
-
- mtx sync.Mutex
- mCtx map[uint64]context.CancelFunc
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Controller.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Controller does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Controller {
- switch {
- case prm.LocalTrustSource == nil:
- panicOnPrmValue("LocalTrustSource", prm.LocalTrustSource)
- case prm.LocalTrustTarget == nil:
- panicOnPrmValue("LocalTrustTarget", prm.LocalTrustTarget)
- }
-
- o := defaultOpts()
-
- for _, opt := range opts {
- opt(o)
- }
-
- return &Controller{
- prm: prm,
- opts: o,
- mCtx: make(map[uint64]context.CancelFunc),
- }
-}
diff --git a/pkg/services/reputation/local/controller/deps.go b/pkg/services/reputation/local/controller/deps.go
deleted file mode 100644
index 3ab72eb5c2..0000000000
--- a/pkg/services/reputation/local/controller/deps.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package trustcontroller
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
-)
-
-// Iterator is a group of methods provided by entity
-// which can iterate over a group of reputation.Trust values.
-type Iterator interface {
- // Iterate must start an iterator over all trust values.
- // For each value should call a handler, the error
- // of which should be directly returned from the method.
- //
- // Internal failures of the iterator are also signaled via
- // an error. After a successful call to the last value
- // handler, nil should be returned.
- Iterate(reputation.TrustHandler) error
-}
-
-// IteratorProvider is a group of methods provided
-// by entity which generates iterators over
-// reputation.Trust values.
-type IteratorProvider interface {
- // InitIterator should return an initialized Iterator
- // that iterates over values from IteratorContext.Epoch() epoch.
- //
- // Initialization problems are reported via error.
- // If no error was returned, then the Iterator must not be nil.
- //
- // Implementations can have different logic for different
- // contexts, so specific ones may document their own behavior.
- InitIterator(common.Context) (Iterator, error)
-}
diff --git a/pkg/services/reputation/local/controller/opts.go b/pkg/services/reputation/local/controller/opts.go
deleted file mode 100644
index 385a4243b0..0000000000
--- a/pkg/services/reputation/local/controller/opts.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package trustcontroller
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Controller.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns option to specify logging component.
-//
-// Ignores nil values.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/local/controller/util.go b/pkg/services/reputation/local/controller/util.go
deleted file mode 100644
index 97b9e3a658..0000000000
--- a/pkg/services/reputation/local/controller/util.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package trustcontroller
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
-
-type storageWrapper struct {
- w common.Writer
- i Iterator
-}
-
-func (s storageWrapper) InitIterator(common.Context) (Iterator, error) {
- return s.i, nil
-}
-
-func (s storageWrapper) InitWriter(common.Context) (common.Writer, error) {
- return s.w, nil
-}
-
-// SimpleIteratorProvider returns IteratorProvider that provides
-// static context-independent Iterator.
-func SimpleIteratorProvider(i Iterator) IteratorProvider {
- return &storageWrapper{
- i: i,
- }
-}
-
-// SimpleWriterProvider returns WriterProvider that provides
-// static context-independent Writer.
-func SimpleWriterProvider(w common.Writer) common.WriterProvider {
- return &storageWrapper{
- w: w,
- }
-}
diff --git a/pkg/services/reputation/local/routes/builder.go b/pkg/services/reputation/local/routes/builder.go
deleted file mode 100644
index ddd5a2ae0d..0000000000
--- a/pkg/services/reputation/local/routes/builder.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package routes
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// Prm groups the required parameters of the Builder's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Manager builder for current node.
- //
- // Must not be nil.
- ManagerBuilder common.ManagerBuilder
-
- Log *logger.Logger
-}
-
-// Builder represents component that routes node to its managers.
-//
-// For correct operation, Builder must be created using
-// the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Builder is immediately ready to work through API.
-type Builder struct {
- managerBuilder common.ManagerBuilder
- log *logger.Logger
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Builder.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Builder does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm) *Builder {
- switch {
- case prm.ManagerBuilder == nil:
- panicOnPrmValue("ManagerBuilder", prm.ManagerBuilder)
- case prm.Log == nil:
- panicOnPrmValue("Logger", prm.Log)
- }
-
- return &Builder{
- managerBuilder: prm.ManagerBuilder,
- log: prm.Log,
- }
-}
diff --git a/pkg/services/reputation/local/routes/calls.go b/pkg/services/reputation/local/routes/calls.go
deleted file mode 100644
index f0eae16fee..0000000000
--- a/pkg/services/reputation/local/routes/calls.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package routes
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "go.uber.org/zap"
-)
-
-// NextStage builds Manager list for trusting node and returns it directly.
-//
-// If passed route has more than one point, then endpoint of the route is reached.
-func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) {
- passedLen := len(passed)
-
- b.log.Debug("building next stage for local trust route",
- zap.Uint64("epoch", epoch),
- zap.Int("passed_length", passedLen),
- )
-
- if passedLen > 1 {
- return nil, nil
- }
-
- route, err := b.managerBuilder.BuildManagers(epoch, t.TrustingPeer())
- if err != nil {
- return nil, fmt.Errorf("could not build managers for epoch: %d: %w", epoch, err)
- }
-
- return route, nil
-}
diff --git a/pkg/services/reputation/local/storage/calls.go b/pkg/services/reputation/local/storage/calls.go
deleted file mode 100644
index 14acbb64f1..0000000000
--- a/pkg/services/reputation/local/storage/calls.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package truststorage
-
-import (
- "errors"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// UpdatePrm groups the parameters of Storage's Update operation.
-type UpdatePrm struct {
- sat bool
-
- epoch uint64
-
- peer apireputation.PeerID
-}
-
-// SetEpoch sets number of the epoch
-// when the interaction happened.
-func (p *UpdatePrm) SetEpoch(e uint64) {
- p.epoch = e
-}
-
-// SetPeer sets identifier of the peer
-// with which the local node interacted.
-func (p *UpdatePrm) SetPeer(id apireputation.PeerID) {
- p.peer = id
-}
-
-// SetSatisfactory sets successful completion status.
-func (p *UpdatePrm) SetSatisfactory(sat bool) {
- p.sat = sat
-}
-
-type trustValue struct {
- sat, all int
-}
-
-// EpochTrustValueStorage represents storage of
-// the trust values by particular epoch.
-type EpochTrustValueStorage struct {
- mtx sync.RWMutex
-
- mItems map[string]*trustValue
-}
-
-func newTrustValueStorage() *EpochTrustValueStorage {
- return &EpochTrustValueStorage{
- mItems: make(map[string]*trustValue, 1),
- }
-}
-
-func stringifyPeerID(id apireputation.PeerID) string {
- return string(id.PublicKey())
-}
-
-func peerIDFromString(str string) (res apireputation.PeerID) {
- res.SetPublicKey([]byte(str))
- return
-}
-
-func (s *EpochTrustValueStorage) update(prm UpdatePrm) {
- s.mtx.Lock()
-
- {
- strID := stringifyPeerID(prm.peer)
-
- val, ok := s.mItems[strID]
- if !ok {
- val = new(trustValue)
- s.mItems[strID] = val
- }
-
- if prm.sat {
- val.sat++
- }
-
- val.all++
- }
-
- s.mtx.Unlock()
-}
-
-// Update updates the number of satisfactory transactions with peer.
-func (s *Storage) Update(prm UpdatePrm) {
- var trustStorage *EpochTrustValueStorage
-
- s.mtx.Lock()
-
- {
- var (
- ok bool
- epoch = prm.epoch
- )
-
- trustStorage, ok = s.mItems[epoch]
- if !ok {
- trustStorage = newTrustValueStorage()
- s.mItems[epoch] = trustStorage
- }
- }
-
- s.mtx.Unlock()
-
- trustStorage.update(prm)
-}
-
-// ErrNoPositiveTrust is returned by iterator when
-// there is no positive number of successful transactions.
-var ErrNoPositiveTrust = errors.New("no positive trust")
-
-// DataForEpoch returns EpochValueStorage for epoch.
-//
-// If there is no data for the epoch, ErrNoPositiveTrust returns.
-func (s *Storage) DataForEpoch(epoch uint64) (*EpochTrustValueStorage, error) {
- s.mtx.RLock()
- trustStorage, ok := s.mItems[epoch]
- s.mtx.RUnlock()
-
- if !ok {
- return nil, ErrNoPositiveTrust
- }
-
- return trustStorage, nil
-}
-
-// Iterate iterates over normalized trust values and passes them to parameterized handler.
-//
-// Values are normalized according to http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Chapter 4.5.
-// If divisor in formula is zero, ErrNoPositiveTrust returns.
-func (s *EpochTrustValueStorage) Iterate(h reputation.TrustHandler) (err error) {
- s.mtx.RLock()
-
- {
- var (
- sum reputation.TrustValue
- mVals = make(map[string]reputation.TrustValue, len(s.mItems))
- )
-
- // iterate first time to calculate normalizing divisor
- for strID, val := range s.mItems {
- if val.all > 0 {
- num := reputation.TrustValueFromInt(val.sat)
- denom := reputation.TrustValueFromInt(val.all)
-
- v := num.Div(denom)
-
- mVals[strID] = v
-
- sum.Add(v)
- }
- }
-
- err = ErrNoPositiveTrust
-
- if !sum.IsZero() {
- for strID, val := range mVals {
- t := reputation.Trust{}
-
- t.SetPeer(peerIDFromString(strID))
- t.SetValue(val.Div(sum))
-
- if err = h(t); err != nil {
- break
- }
- }
- }
- }
-
- s.mtx.RUnlock()
-
- return
-}
diff --git a/pkg/services/reputation/local/storage/storage.go b/pkg/services/reputation/local/storage/storage.go
deleted file mode 100644
index d7e54a3fc8..0000000000
--- a/pkg/services/reputation/local/storage/storage.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package truststorage
-
-import (
- "sync"
-)
-
-// Prm groups the required parameters of the Storage's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct{}
-
-// Storage represents in-memory storage of
-// local reputation values.
-//
-// Storage provides access to normalized local trust
-// values through iterator interface.
-//
-// For correct operation, Storage must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// Storage is immediately ready to work through API.
-type Storage struct {
- prm Prm
-
- mtx sync.RWMutex
-
- mItems map[uint64]*EpochTrustValueStorage
-}
-
-// New creates a new instance of the Storage.
-//
-// The created Storage does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm) *Storage {
- return &Storage{
- prm: prm,
- mItems: make(map[uint64]*EpochTrustValueStorage),
- }
-}
diff --git a/pkg/services/reputation/rpc/response.go b/pkg/services/reputation/rpc/response.go
deleted file mode 100644
index 808a0a4767..0000000000
--- a/pkg/services/reputation/rpc/response.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package reputationrpc
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
-)
-
-type responseService struct {
- respSvc *response.Service
-
- svc Server
-}
-
-// NewResponseService returns reputation service server instance that passes
-// internal service call to response service.
-func NewResponseService(cnrSvc Server, respSvc *response.Service) Server {
- return &responseService{
- respSvc: respSvc,
- svc: cnrSvc,
- }
-}
-
-func (s *responseService) AnnounceLocalTrust(ctx context.Context, req *reputation.AnnounceLocalTrustRequest) (*reputation.AnnounceLocalTrustResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceLocalTrust(ctx, req.(*reputation.AnnounceLocalTrustRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*reputation.AnnounceLocalTrustResponse), nil
-}
-
-func (s *responseService) AnnounceIntermediateResult(ctx context.Context, req *reputation.AnnounceIntermediateResultRequest) (*reputation.AnnounceIntermediateResultResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceIntermediateResult(ctx, req.(*reputation.AnnounceIntermediateResultRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*reputation.AnnounceIntermediateResultResponse), nil
-}
diff --git a/pkg/services/reputation/rpc/server.go b/pkg/services/reputation/rpc/server.go
deleted file mode 100644
index 78af30ea7b..0000000000
--- a/pkg/services/reputation/rpc/server.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package reputationrpc
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
-)
-
-// Server is an interface of the FrostFS API v2 Reputation service server.
-type Server interface {
- AnnounceLocalTrust(context.Context, *reputation.AnnounceLocalTrustRequest) (*reputation.AnnounceLocalTrustResponse, error)
- AnnounceIntermediateResult(context.Context, *reputation.AnnounceIntermediateResultRequest) (*reputation.AnnounceIntermediateResultResponse, error)
-}
diff --git a/pkg/services/reputation/rpc/sign.go b/pkg/services/reputation/rpc/sign.go
deleted file mode 100644
index 9db06ff1ef..0000000000
--- a/pkg/services/reputation/rpc/sign.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package reputationrpc
-
-import (
- "context"
- "crypto/ecdsa"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
-)
-
-type signService struct {
- sigSvc *util.SignService
-
- svc Server
-}
-
-func NewSignService(key *ecdsa.PrivateKey, svc Server) Server {
- return &signService{
- sigSvc: util.NewUnarySignService(key),
- svc: svc,
- }
-}
-
-func (s *signService) AnnounceLocalTrust(ctx context.Context, req *reputation.AnnounceLocalTrustRequest) (*reputation.AnnounceLocalTrustResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceLocalTrust(ctx, req.(*reputation.AnnounceLocalTrustRequest))
- },
- func() util.ResponseMessage {
- return new(reputation.AnnounceLocalTrustResponse)
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*reputation.AnnounceLocalTrustResponse), nil
-}
-
-func (s *signService) AnnounceIntermediateResult(ctx context.Context, req *reputation.AnnounceIntermediateResultRequest) (*reputation.AnnounceIntermediateResultResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceIntermediateResult(ctx, req.(*reputation.AnnounceIntermediateResultRequest))
- },
- func() util.ResponseMessage {
- return new(reputation.AnnounceIntermediateResultResponse)
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*reputation.AnnounceIntermediateResultResponse), nil
-}
diff --git a/pkg/services/reputation/trust.go b/pkg/services/reputation/trust.go
deleted file mode 100644
index 8c5d9091a3..0000000000
--- a/pkg/services/reputation/trust.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package reputation
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// TrustValue represents the numeric value of the node's trust.
-type TrustValue float64
-
-const (
- // TrustOne is a trust value equal to one.
- TrustOne = TrustValue(1)
-
- // TrustZero is a trust value equal to zero.
- TrustZero = TrustValue(0)
-)
-
-// TrustValueFromFloat64 converts float64 to TrustValue.
-func TrustValueFromFloat64(v float64) TrustValue {
- return TrustValue(v)
-}
-
-// TrustValueFromInt converts int to TrustValue.
-func TrustValueFromInt(v int) TrustValue {
- return TrustValue(v)
-}
-
-func (v TrustValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// Float64 converts TrustValue to float64.
-func (v TrustValue) Float64() float64 {
- return float64(v)
-}
-
-// Add adds v2 to v.
-func (v *TrustValue) Add(v2 TrustValue) {
- *v = *v + v2
-}
-
-// Div returns the result of dividing v by v2.
-func (v TrustValue) Div(v2 TrustValue) TrustValue {
- return v / v2
-}
-
-// Mul multiplies v by v2.
-func (v *TrustValue) Mul(v2 TrustValue) {
- *v *= v2
-}
-
-// IsZero returns true if v equal to zero.
-func (v TrustValue) IsZero() bool {
- return v == 0
-}
-
-// Trust represents peer's trust (reputation).
-type Trust struct {
- trusting, peer reputation.PeerID
-
- val TrustValue
-}
-
-// TrustHandler describes the signature of the reputation.Trust
-// value handling function.
-//
-// Termination of processing without failures is usually signaled
-// with a zero error, while a specific value may describe the reason
-// for failure.
-type TrustHandler func(Trust) error
-
-// Value returns peer's trust value.
-func (t Trust) Value() TrustValue {
- return t.val
-}
-
-// SetValue sets peer's trust value.
-func (t *Trust) SetValue(val TrustValue) {
- t.val = val
-}
-
-// Peer returns trusted peer ID.
-func (t Trust) Peer() reputation.PeerID {
- return t.peer
-}
-
-// SetPeer sets trusted peer ID.
-func (t *Trust) SetPeer(id reputation.PeerID) {
- t.peer = id
-}
-
-// TrustingPeer returns trusting peer ID.
-func (t Trust) TrustingPeer() reputation.PeerID {
- return t.trusting
-}
-
-// SetTrustingPeer sets trusting peer ID.
-func (t *Trust) SetTrustingPeer(id reputation.PeerID) {
- t.trusting = id
-}
diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go
index 237a139622..f0591de71c 100644
--- a/pkg/services/session/executor.go
+++ b/pkg/services/session/executor.go
@@ -4,8 +4,10 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"go.uber.org/zap"
)
@@ -16,22 +18,22 @@ type ServiceExecutor interface {
type executorSvc struct {
exec ServiceExecutor
+ respSvc *response.Service
+
log *logger.Logger
}
// NewExecutionService wraps ServiceExecutor and returns Session Service interface.
-func NewExecutionService(exec ServiceExecutor, l *logger.Logger) Server {
+func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *logger.Logger) Server {
return &executorSvc{
- exec: exec,
- log: l,
+ exec: exec,
+ log: l,
+ respSvc: respSvc,
}
}
func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- s.log.Debug("serving request...",
- zap.String("component", "SessionService"),
- zap.String("request", "Create"),
- )
+ s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create"))
respBody, err := s.exec.Create(ctx, req.GetBody())
if err != nil {
@@ -41,5 +43,6 @@ func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*
resp := new(session.CreateResponse)
resp.SetBody(respBody)
+ s.respSvc.SetMeta(resp)
return resp, nil
}
diff --git a/pkg/services/session/response.go b/pkg/services/session/response.go
deleted file mode 100644
index cbf93fb1f8..0000000000
--- a/pkg/services/session/response.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package session
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
-)
-
-type responseService struct {
- respSvc *response.Service
-
- svc Server
-}
-
-// NewResponseService returns session service instance that passes internal service
-// call to response service.
-func NewResponseService(ssSvc Server, respSvc *response.Service) Server {
- return &responseService{
- respSvc: respSvc,
- svc: ssSvc,
- }
-}
-
-func (s *responseService) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Create(ctx, req.(*session.CreateRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*session.CreateResponse), nil
-}
diff --git a/pkg/services/session/server.go b/pkg/services/session/server.go
index 9e44ae6676..e8555a7c94 100644
--- a/pkg/services/session/server.go
+++ b/pkg/services/session/server.go
@@ -3,7 +3,7 @@ package session
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
// Server is an interface of the FrostFS API Session service server.
diff --git a/pkg/services/session/sign.go b/pkg/services/session/sign.go
index 1156dc5380..3664c14035 100644
--- a/pkg/services/session/sign.go
+++ b/pkg/services/session/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
type signService struct {
@@ -22,17 +22,10 @@ func NewSignService(key *ecdsa.PrivateKey, svc Server) Server {
}
func (s *signService) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.Create(ctx, req.(*session.CreateRequest))
- },
- func() util.ResponseMessage {
- return new(session.CreateResponse)
- },
- )
- if err != nil {
- return nil, err
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(session.CreateResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- return resp.(*session.CreateResponse), nil
+ resp, err := util.EnsureNonNilResponse(s.svc.Create(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
diff --git a/pkg/services/session/storage/persistent/executor.go b/pkg/services/session/storage/persistent/executor.go
index f59e312c48..ea0233f9a4 100644
--- a/pkg/services/session/storage/persistent/executor.go
+++ b/pkg/services/session/storage/persistent/executor.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.etcd.io/bbolt"
@@ -17,7 +17,7 @@ import (
// encrypts private keys if storage has been configured so).
// Returns response that is filled with just created token's
// ID and public key for it.
-func (s *TokenStore) Create(ctx context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) {
+func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errors.New("missing owner")
diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go
index 860b958974..f80ecb591a 100644
--- a/pkg/services/session/storage/persistent/executor_test.go
+++ b/pkg/services/session/storage/persistent/executor_test.go
@@ -8,8 +8,8 @@ import (
"path/filepath"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
@@ -22,7 +22,7 @@ func TestTokenStore(t *testing.T) {
defer ts.Close()
- owner := *usertest.ID()
+ owner := usertest.ID()
var ownerV2 refs.OwnerID
owner.WriteToV2(&ownerV2)
@@ -39,7 +39,7 @@ func TestTokenStore(t *testing.T) {
tokens := make([]tok, 0, tokenNumber)
- for i := 0; i < tokenNumber; i++ {
+ for i := range tokenNumber {
req.SetExpiration(uint64(i))
res, err := ts.Create(context.Background(), req)
@@ -66,7 +66,7 @@ func TestTokenStore_Persistent(t *testing.T) {
ts, err := NewTokenStore(path)
require.NoError(t, err)
- idOwner := *usertest.ID()
+ idOwner := usertest.ID()
var idOwnerV2 refs.OwnerID
idOwner.WriteToV2(&idOwnerV2)
@@ -127,7 +127,7 @@ func TestTokenStore_RemoveOld(t *testing.T) {
defer ts.Close()
- owner := *usertest.ID()
+ owner := usertest.ID()
var ownerV2 refs.OwnerID
owner.WriteToV2(&ownerV2)
@@ -166,14 +166,14 @@ func TestTokenStore_RemoveOld(t *testing.T) {
//
// If this test is passing, TokenStore works correctly.
func TestBolt_Cursor(t *testing.T) {
- db, err := bbolt.Open(filepath.Join(t.TempDir(), ".storage"), 0666, nil)
+ db, err := bbolt.Open(filepath.Join(t.TempDir(), ".storage"), 0o666, nil)
require.NoError(t, err)
defer db.Close()
cursorKeys := make(map[string]struct{})
- var bucketName = []byte("bucket")
+ bucketName := []byte("bucket")
err = db.Update(func(tx *bbolt.Tx) (err error) {
b, err := tx.CreateBucket(bucketName)
diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go
index 411734ea18..60db97f901 100644
--- a/pkg/services/session/storage/persistent/options.go
+++ b/pkg/services/session/storage/persistent/options.go
@@ -19,7 +19,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- l: &logger.Logger{Logger: zap.L()},
+ l: logger.NewLoggerWrapper(zap.L()),
timeout: 100 * time.Millisecond,
}
}
diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go
index ded33d1ec6..132d624459 100644
--- a/pkg/services/session/storage/persistent/storage.go
+++ b/pkg/services/session/storage/persistent/storage.go
@@ -1,11 +1,13 @@
package persistent
import (
+ "context"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -39,7 +41,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
o(cfg)
}
- db, err := bbolt.Open(path, 0600,
+ db, err := bbolt.Open(path, 0o600,
&bbolt.Options{
Timeout: cfg.timeout,
})
@@ -62,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
// enable encryption if it
// was configured so
if cfg.privateKey != nil {
- rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8)
+ rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8)
cfg.privateKey.D.FillBytes(rawKey)
c, err := aes.NewCipher(rawKey)
@@ -101,14 +103,10 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok
var err error
t, err = s.unpackToken(rawToken)
- if err != nil {
- return err
- }
-
- return nil
+ return err
})
if err != nil {
- s.l.Error("could not get session from persistent storage",
+ s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage,
zap.Error(err),
zap.Stringer("ownerID", ownerID),
zap.String("tokenID", hex.EncodeToString(tokenID)),
@@ -133,7 +131,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
if epochFromToken(v) <= epoch {
err = c.Delete()
if err != nil {
- s.l.Error("could not delete %s token",
+ s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken,
zap.String("token_id", hex.EncodeToString(k)),
)
}
@@ -144,7 +142,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
})
})
if err != nil {
- s.l.Error("could not clean up expired tokens",
+ s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens,
zap.Uint64("epoch", epoch),
)
}
diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go
index aa64d796b5..423e579d7e 100644
--- a/pkg/services/session/storage/temporary/executor.go
+++ b/pkg/services/session/storage/temporary/executor.go
@@ -5,14 +5,14 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
-func (s *TokenStore) Create(ctx context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) {
+func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errors.New("missing owner")
@@ -38,7 +38,7 @@ func (s *TokenStore) Create(ctx context.Context, body *session.CreateRequestBody
s.mtx.Lock()
s.tokens[key{
tokenID: base58.Encode(uidBytes),
- ownerID: base58.Encode(id.WalletBytes()),
+ ownerID: id.EncodeToString(),
}] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration())
s.mtx.Unlock()
diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go
index 370499e062..c9da6b842f 100644
--- a/pkg/services/session/storage/temporary/storage.go
+++ b/pkg/services/session/storage/temporary/storage.go
@@ -9,7 +9,9 @@ import (
)
type key struct {
+ // nolint:unused
tokenID string
+ // nolint:unused
ownerID string
}
@@ -18,7 +20,7 @@ type key struct {
// expiring (removing) session tokens.
// Must be created only via calling NewTokenStore.
type TokenStore struct {
- mtx *sync.RWMutex
+ mtx sync.RWMutex
tokens map[key]*storage.PrivateToken
}
@@ -28,7 +30,6 @@ type TokenStore struct {
// The elements of the instance are stored in the map.
func NewTokenStore() *TokenStore {
return &TokenStore{
- mtx: new(sync.RWMutex),
tokens: make(map[key]*storage.PrivateToken),
}
}
@@ -40,7 +41,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken
s.mtx.RLock()
t := s.tokens[key{
tokenID: base58.Encode(tokenID),
- ownerID: base58.Encode(ownerID.WalletBytes()),
+ ownerID: ownerID.EncodeToString(),
}]
s.mtx.RUnlock()
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
new file mode 100644
index 0000000000..58757ff6d8
--- /dev/null
+++ b/pkg/services/tree/ape.go
@@ -0,0 +1,104 @@
+package tree
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "net"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/converter"
+ aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
+ core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "google.golang.org/grpc/peer"
+)
+
+func (s *Service) newAPERequest(ctx context.Context, namespace string,
+ cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+) (aperequest.Request, error) {
+ schemaMethod, err := converter.SchemaMethodFromACLOperation(operation)
+ if err != nil {
+ return aperequest.Request{}, err
+ }
+ schemaRole, err := converter.SchemaRoleFromACLRole(role)
+ if err != nil {
+ return aperequest.Request{}, err
+ }
+ reqProps := map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()),
+ nativeschema.PropertyKeyActorRole: schemaRole,
+ }
+ reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey)
+ if err != nil {
+ return aperequest.Request{}, err
+ }
+ if p, ok := peer.FromContext(ctx); ok {
+ if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
+ reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
+ }
+ }
+
+ var resourceName string
+ if namespace == "root" || namespace == "" {
+ resourceName = fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cid.EncodeToString())
+ } else {
+ resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString())
+ }
+
+ resProps := map[string]string{
+ nativeschema.ProperyKeyTreeID: treeID,
+ }
+
+ return aperequest.NewRequest(
+ schemaMethod,
+ aperequest.NewResource(resourceName, resProps),
+ reqProps,
+ ), nil
+}
+
+func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
+ container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
+) error {
+ namespace := ""
+ cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns")
+ if hasNamespace {
+ namespace = cntNamespace
+ }
+
+ request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey)
+ if err != nil {
+ return fmt.Errorf("failed to create ape request: %w", err)
+ }
+
+ return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{
+ Request: request,
+ Namespace: namespace,
+ Container: cid,
+ ContainerOwner: container.Value.Owner(),
+ PublicKey: publicKey,
+ BearerToken: bt,
+ })
+}
+
+// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
+func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
+ if reqProps == nil {
+ reqProps = make(map[string]string)
+ }
+ props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey)
+ if err != nil {
+ return reqProps, err
+ }
+ for propertyName, properyValue := range props {
+ reqProps[propertyName] = properyValue
+ }
+ return reqProps, nil
+}
diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go
new file mode 100644
index 0000000000..7b209fd47d
--- /dev/null
+++ b/pkg/services/tree/ape_test.go
@@ -0,0 +1,246 @@
+package tree
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy"
+
+ senderPrivateKey, _ = keys.NewPrivateKey()
+
+ senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes())
+
+ rootCnr = &core.Container{Value: containerSDK.Container{}}
+)
+
+type frostfsIDProviderMock struct {
+ subjects map[util.Uint160]*client.Subject
+ subjectsExtended map[util.Uint160]*client.SubjectExtended
+}
+
+func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
+ v, ok := f.subjects[key]
+ if !ok {
+ return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
+ }
+ return v, nil
+}
+
+func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
+ v, ok := f.subjectsExtended[key]
+ if !ok {
+ return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
+ }
+ return v, nil
+}
+
+var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil)
+
+func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock {
+ return &frostfsIDProviderMock{
+ subjects: map[util.Uint160]*client.Subject{
+ scriptHashFromSenderKey(t, senderKey): {
+ Namespace: "testnamespace",
+ Name: "test",
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ },
+ },
+ subjectsExtended: map[util.Uint160]*client.SubjectExtended{
+ scriptHashFromSenderKey(t, senderKey): {
+ Namespace: "testnamespace",
+ Name: "test",
+ KV: map[string]string{
+ "tag-attr1": "value1",
+ "tag-attr2": "value2",
+ },
+ Groups: []*client.Group{
+ {
+ ID: 1,
+ Name: "test",
+ Namespace: "testnamespace",
+ KV: map[string]string{
+ "attr1": "value1",
+ "attr2": "value2",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
+ pk, err := keys.NewPublicKeyFromString(senderKey)
+ require.NoError(t, err)
+ return pk.GetScriptHash()
+}
+
+type stMock struct{}
+
+func (m *stMock) CurrentEpoch() uint64 {
+ return 8
+}
+
+func TestCheckAPE(t *testing.T) {
+ cid := cid.ID{}
+ _ = cid.DecodeString(containerID)
+
+ t.Run("treeID rule", func(t *testing.T) {
+ los := inmemory.NewInmemoryLocalStorage()
+ mcs := inmemory.NewInmemoryMorphRuleChainStorage()
+ fid := newFrostfsIDProviderMock(t)
+ s := Service{
+ cfg: cfg{
+ frostfsidSubjectProvider: fid,
+ },
+ apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
+ }
+
+ mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.QuotaLimitReached,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindResource,
+ Key: nativeschema.ProperyKeyTreeID,
+ Value: versionTreeID,
+ },
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey())
+
+ var chErr *checkercore.ChainRouterError
+ require.ErrorAs(t, err, &chErr)
+ require.Equal(t, chain.QuotaLimitReached, chErr.Status())
+ })
+
+ t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) {
+ los := inmemory.NewInmemoryLocalStorage()
+ mcs := inmemory.NewInmemoryMorphRuleChainStorage()
+ fid := newFrostfsIDProviderMock(t)
+ s := Service{
+ cfg: cfg{
+ frostfsidSubjectProvider: fid,
+ },
+ apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
+ }
+
+ los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringNotEquals,
+ Kind: chain.KindResource,
+ Key: nativeschema.PropertyKeyObjectType,
+ Value: "TOMBSTONE",
+ },
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey())
+ require.NoError(t, err)
+ })
+
+ t.Run("delete rule won't affect tree add", func(t *testing.T) {
+ los := inmemory.NewInmemoryLocalStorage()
+ mcs := inmemory.NewInmemoryMorphRuleChainStorage()
+ fid := newFrostfsIDProviderMock(t)
+ s := Service{
+ cfg: cfg{
+ frostfsidSubjectProvider: fid,
+ },
+ apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
+ }
+
+ los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.AccessDenied,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
+ Rules: []chain.Rule{
+ {
+ Status: chain.Allow,
+ Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: chain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
+ },
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringNotEquals,
+ Kind: chain.KindResource,
+ Key: nativeschema.PropertyKeyObjectType,
+ Value: "TOMBSTONE",
+ },
+ },
+ },
+ },
+ MatchType: chain.MatchTypeFirstMatch,
+ })
+
+ err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey())
+ require.NoError(t, err)
+ })
+}
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index 73745e1b1b..a117007719 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -2,22 +2,24 @@ package tree
import (
"context"
+ "crypto/ecdsa"
"errors"
"fmt"
- "strings"
"sync"
"time"
+ internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials/insecure"
)
type clientCache struct {
sync.Mutex
simplelru.LRU[string, cacheItem]
+ key *ecdsa.PrivateKey
+ ds *internalNet.DialerSource
}
type cacheItem struct {
@@ -26,23 +28,27 @@ type cacheItem struct {
}
const (
- defaultClientCacheSize = 10
+ defaultClientCacheSize = 32
defaultClientConnectTimeout = time.Second * 2
defaultReconnectInterval = time.Second * 15
)
var errRecentlyFailed = errors.New("client has recently failed")
-func (c *clientCache) init() {
- l, _ := simplelru.NewLRU[string, cacheItem](defaultClientCacheSize, func(_ string, value cacheItem) {
- _ = value.cc.Close()
+func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
+ l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) {
+ if conn := value.cc; conn != nil {
+ _ = conn.Close()
+ }
})
c.LRU = *l
+ c.key = pk
+ c.ds = ds
}
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
c.Lock()
- ccInt, ok := c.LRU.Get(netmapAddr)
+ ccInt, ok := c.Get(netmapAddr)
c.Unlock()
if ok {
@@ -60,14 +66,19 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
}
}
- cc, err := dialTreeService(ctx, netmapAddr)
+ var netAddr network.Address
+ if err := netAddr.FromString(netmapAddr); err != nil {
+ return nil, err
+ }
+
+ cc, err := dialTreeService(ctx, netAddr, c.key, c.ds)
lastTry := time.Now()
c.Lock()
if err != nil {
- c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
+ c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
} else {
- c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
+ c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
}
c.Unlock()
@@ -77,24 +88,3 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
return NewTreeServiceClient(cc), nil
}
-
-func dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
- var netAddr network.Address
- if err := netAddr.FromString(netmapAddr); err != nil {
- return nil, err
- }
-
- opts := make([]grpc.DialOption, 1, 2)
- opts[0] = grpc.WithBlock()
-
- // FIXME(@fyrchik): ugly hack #1322
- if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
- opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
- }
-
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
- cancel()
-
- return cc, err
-}
diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go
index 4352575500..c641a21a2c 100644
--- a/pkg/services/tree/container.go
+++ b/pkg/services/tree/container.go
@@ -2,6 +2,7 @@ package tree
import (
"bytes"
+ "context"
"crypto/sha256"
"fmt"
"sync"
@@ -32,13 +33,13 @@ type containerCacheItem struct {
const defaultContainerCacheSize = 10
// getContainerNodes returns nodes in the container and a position of local key in the list.
-func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
- nm, err := s.nmSource.GetNetMap(0)
+func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
+ nm, err := s.nmSource.GetNetMap(ctx, 0)
if err != nil {
return nil, -1, fmt.Errorf("can't get netmap: %w", err)
}
- cnr, err := s.cnrSource.Get(cid)
+ cnr, err := s.cnrSource.Get(ctx, cid)
if err != nil {
return nil, -1, fmt.Errorf("can't get container: %w", err)
}
diff --git a/pkg/services/tree/drop.go b/pkg/services/tree/drop.go
index c0750cbdcf..a9e4e2e714 100644
--- a/pkg/services/tree/drop.go
+++ b/pkg/services/tree/drop.go
@@ -7,8 +7,8 @@ import (
)
// DropTree drops a tree from the database. If treeID is empty, all the trees are dropped.
-func (s *Service) DropTree(_ context.Context, cid cid.ID, treeID string) error {
+func (s *Service) DropTree(ctx context.Context, cid cid.ID, treeID string) error {
// The only current use-case is a container removal, where all trees should be removed.
// Thus there is no need to replicate the operation on other node.
- return s.forest.TreeDrop(cid, treeID)
+ return s.forest.TreeDrop(ctx, cid, treeID)
}
diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go
index 5feeace919..e7a13827ec 100644
--- a/pkg/services/tree/getsubtree_test.go
+++ b/pkg/services/tree/getsubtree_test.go
@@ -1,7 +1,11 @@
package tree
import (
+ "context"
"errors"
+ "path"
+ "path/filepath"
+ "slices"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -30,9 +34,10 @@ func TestGetSubTree(t *testing.T) {
for i := range tree {
path := tree[i].path
meta := []pilorama.KeyValue{
- {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])}}
+ {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])},
+ }
- lm, err := p.TreeAddByPath(d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta)
+ lm, err := p.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
@@ -41,9 +46,9 @@ func TestGetSubTree(t *testing.T) {
testGetSubTree := func(t *testing.T, rootID uint64, depth uint32, errIndex int) []uint64 {
acc := subTreeAcc{errIndex: errIndex}
- err := getSubTree(&acc, d.CID, &GetSubTreeRequest_Body{
+ err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
TreeId: treeID,
- RootId: rootID,
+ RootId: []uint64{rootID},
Depth: depth,
}, p)
if errIndex == -1 {
@@ -53,12 +58,12 @@ func TestGetSubTree(t *testing.T) {
}
// GetSubTree must return child only after is has returned the parent.
- require.Equal(t, rootID, acc.seen[0].Body.NodeId)
+ require.Equal(t, rootID, acc.seen[0].Body.NodeId[0])
loop:
for i := 1; i < len(acc.seen); i++ {
parent := acc.seen[i].Body.ParentId
- for j := 0; j < i; j++ {
- if acc.seen[j].Body.NodeId == parent {
+ for j := range i {
+ if acc.seen[j].Body.NodeId[0] == parent[0] {
continue loop
}
}
@@ -68,16 +73,16 @@ func TestGetSubTree(t *testing.T) {
// GetSubTree must return valid meta.
for i := range acc.seen {
b := acc.seen[i].Body
- meta, node, err := p.TreeGetMeta(d.CID, treeID, b.NodeId)
+ meta, node, err := p.TreeGetMeta(context.Background(), d.CID, treeID, b.NodeId[0])
require.NoError(t, err)
- require.Equal(t, node, b.ParentId)
- require.Equal(t, meta.Time, b.Timestamp)
+ require.Equal(t, node, b.ParentId[0])
+ require.Equal(t, meta.Time, b.Timestamp[0])
require.Equal(t, metaToProto(meta.Items), b.Meta)
}
ordered := make([]uint64, len(acc.seen))
for i := range acc.seen {
- ordered[i] = acc.seen[i].Body.NodeId
+ ordered[i] = acc.seen[i].Body.NodeId[0]
}
return ordered
}
@@ -118,7 +123,112 @@ func TestGetSubTree(t *testing.T) {
})
}
-var errSubTreeSend = errors.New("test error")
+func TestGetSubTreeOrderAsc(t *testing.T) {
+ t.Run("memory forest", func(t *testing.T) {
+ testGetSubTreeOrderAsc(t, pilorama.NewMemoryForest())
+ })
+
+ t.Run("boltdb forest", func(t *testing.T) {
+ p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")))
+ require.NoError(t, p.Open(context.Background(), 0o644))
+ require.NoError(t, p.Init(context.Background()))
+ testGetSubTreeOrderAsc(t, p)
+ })
+}
+
+func testGetSubTreeOrderAsc(t *testing.T, p pilorama.ForestStorage) {
+ d := pilorama.CIDDescriptor{CID: cidtest.ID(), Size: 1}
+ treeID := "sometree"
+
+ tree := []struct {
+ path []string
+ id uint64
+ }{
+ {path: []string{"dir1"}},
+ {path: []string{"dir2"}},
+ {path: []string{"dir1", "sub1"}},
+ {path: []string{"dir2", "sub1"}},
+ {path: []string{"dir2", "sub2"}},
+ {path: []string{"dir2", "sub1", "subsub1"}},
+ }
+
+ for i := range tree {
+ path := tree[i].path
+ meta := []pilorama.KeyValue{
+ {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])},
+ }
+
+ lm, err := p.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(lm))
+ tree[i].id = lm[0].Child
+ }
+
+ t.Run("total", func(t *testing.T) {
+ t.Skip()
+ acc := subTreeAcc{errIndex: -1}
+ err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
+ TreeId: treeID,
+ OrderBy: &GetSubTreeRequest_Body_Order{
+ Direction: GetSubTreeRequest_Body_Order_Asc,
+ },
+ }, p)
+ require.NoError(t, err)
+ // GetSubTree must return child only after is has returned the parent.
+ require.Equal(t, uint64(0), acc.seen[0].Body.NodeId)
+
+ paths := make([]string, 0, len(acc.seen))
+ for i := range acc.seen {
+ if i == 0 {
+ continue
+ }
+ found := false
+ for j := range tree {
+ if acc.seen[i].Body.NodeId[0] == tree[j].id {
+ found = true
+ paths = append(paths, path.Join(tree[j].path...))
+ }
+ }
+ require.True(t, found, "unknown node %d %v", i, acc.seen[i].GetBody().GetNodeId())
+ }
+
+ require.True(t, slices.IsSorted(paths))
+ })
+ t.Run("depth=1", func(t *testing.T) {
+ acc := subTreeAcc{errIndex: -1}
+ err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
+ TreeId: treeID,
+ Depth: 1,
+ OrderBy: &GetSubTreeRequest_Body_Order{
+ Direction: GetSubTreeRequest_Body_Order_Asc,
+ },
+ }, p)
+ require.NoError(t, err)
+ require.Len(t, acc.seen, 1)
+ require.Equal(t, uint64(0), acc.seen[0].Body.NodeId[0])
+ })
+ t.Run("depth=2", func(t *testing.T) {
+ acc := subTreeAcc{errIndex: -1}
+ err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
+ TreeId: treeID,
+ Depth: 2,
+ OrderBy: &GetSubTreeRequest_Body_Order{
+ Direction: GetSubTreeRequest_Body_Order_Asc,
+ },
+ }, p)
+ require.NoError(t, err)
+ require.Len(t, acc.seen, 3)
+ require.Equal(t, uint64(0), acc.seen[0].Body.NodeId[0])
+ require.Equal(t, uint64(0), acc.seen[1].GetBody().GetParentId()[0])
+ require.Equal(t, uint64(0), acc.seen[2].GetBody().GetParentId()[0])
+ })
+}
+
+var (
+ errSubTreeSend = errors.New("send finished with error")
+ errSubTreeSendAfterError = errors.New("send was invoked after an error occurred")
+ errInvalidResponse = errors.New("send got invalid response")
+)
type subTreeAcc struct {
grpc.ServerStream // to satisfy the interface
@@ -127,14 +237,26 @@ type subTreeAcc struct {
errIndex int
}
+var _ TreeService_GetSubTreeServer = &subTreeAcc{}
+
func (s *subTreeAcc) Send(r *GetSubTreeResponse) error {
+ b := r.GetBody()
+ if len(b.GetNodeId()) > 1 {
+ return errInvalidResponse
+ }
+ if len(b.GetParentId()) > 1 {
+ return errInvalidResponse
+ }
+ if len(b.GetTimestamp()) > 1 {
+ return errInvalidResponse
+ }
s.seen = append(s.seen, r)
if s.errIndex >= 0 {
if len(s.seen) == s.errIndex+1 {
return errSubTreeSend
}
if s.errIndex >= 0 && len(s.seen) > s.errIndex {
- panic("called Send after an error was returned")
+ return errSubTreeSendAfterError
}
}
return nil
diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go
new file mode 100644
index 0000000000..07503f8c3c
--- /dev/null
+++ b/pkg/services/tree/metrics.go
@@ -0,0 +1,17 @@
+package tree
+
+import "time"
+
+type MetricsRegister interface {
+ AddReplicateTaskDuration(time.Duration, bool)
+ AddReplicateWaitDuration(time.Duration, bool)
+ AddSyncDuration(time.Duration, bool)
+ AddOperation(string, string)
+}
+
+type defaultMetricsRegister struct{}
+
+func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {}
+func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {}
+func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {}
+func (defaultMetricsRegister) AddOperation(string, string) {}
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index d60bc14c53..a28651452d 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -1,38 +1,56 @@
package tree
import (
+ "context"
"crypto/ecdsa"
+ "sync/atomic"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
type ContainerSource interface {
container.Source
+
+ DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error)
+
// List must return list of all the containers in the FrostFS network
// at the moment of a call and any error that does not allow fetching
// container information.
- List() ([]cid.ID, error)
+ List(ctx context.Context) ([]cid.ID, error)
}
type cfg struct {
- log *logger.Logger
- key *ecdsa.PrivateKey
- rawPub []byte
- nmSource netmap.Source
- cnrSource ContainerSource
- eaclSource container.EACLSource
- forest pilorama.Forest
+ log *logger.Logger
+ key *ecdsa.PrivateKey
+ rawPub []byte
+ state netmap.State
+ nmSource netmap.Source
+ cnrSource ContainerSource
+ frostfsidSubjectProvider frostfsidcore.SubjectProvider
+ forest pilorama.Forest
// replication-related parameters
replicatorChannelCapacity int
replicatorWorkerCount int
replicatorTimeout time.Duration
containerCacheSize int
+ authorizedKeys atomic.Pointer[[][]byte]
+ syncBatchSize int
+ syncDisabled bool
+
+ localOverrideStorage policyengine.LocalOverrideStorage
+ morphChainStorage policyengine.MorphRuleChainStorageReader
+
+ metrics MetricsRegister
+ ds *net.DialerSource
}
// Option represents configuration option for a tree service.
@@ -46,11 +64,9 @@ func WithContainerSource(src ContainerSource) Option {
}
}
-// WithEACLSource sets a eACL table source for a tree service.
-// This option is required.
-func WithEACLSource(src container.EACLSource) Option {
+func WithFrostfsidSubjectProvider(provider frostfsidcore.SubjectProvider) Option {
return func(c *cfg) {
- c.eaclSource = src
+ c.frostfsidSubjectProvider = provider
}
}
@@ -101,6 +117,18 @@ func WithReplicationWorkerCount(n int) Option {
}
}
+func WithSyncBatchSize(n int) Option {
+ return func(c *cfg) {
+ c.syncBatchSize = n
+ }
+}
+
+func WithSyncDisabled(d bool) Option {
+ return func(c *cfg) {
+ c.syncDisabled = d
+ }
+}
+
func WithContainerCacheSize(n int) Option {
return func(c *cfg) {
if n > 0 {
@@ -116,3 +144,41 @@ func WithReplicationTimeout(t time.Duration) Option {
}
}
}
+
+func WithMetrics(v MetricsRegister) Option {
+ return func(c *cfg) {
+ c.metrics = v
+ }
+}
+
+// WithAuthorizedKeys returns option to add list of public
+// keys that have rights to use Tree service.
+func WithAuthorizedKeys(keys keys.PublicKeys) Option {
+ return func(c *cfg) {
+ c.authorizedKeys.Store(fromPublicKeys(keys))
+ }
+}
+
+func WithAPELocalOverrideStorage(localOverrideStorage policyengine.LocalOverrideStorage) Option {
+ return func(c *cfg) {
+ c.localOverrideStorage = localOverrideStorage
+ }
+}
+
+func WithAPEMorphRuleStorage(morphRuleStorage policyengine.MorphRuleChainStorageReader) Option {
+ return func(c *cfg) {
+ c.morphChainStorage = morphRuleStorage
+ }
+}
+
+func WithNetmapState(state netmap.State) Option {
+ return func(c *cfg) {
+ c.state = state
+ }
+}
+
+func WithDialerSource(ds *net.DialerSource) Option {
+ return func(c *cfg) {
+ c.ds = ds
+ }
+}
diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go
new file mode 100644
index 0000000000..8f21686df8
--- /dev/null
+++ b/pkg/services/tree/qos.go
@@ -0,0 +1,101 @@
+package tree
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+)
+
+var _ TreeServiceServer = (*ioTagAdjust)(nil)
+
+type AdjustIOTag interface {
+ AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
+}
+
+type ioTagAdjust struct {
+ s TreeServiceServer
+ a AdjustIOTag
+}
+
+func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer {
+ return &ioTagAdjust{
+ s: s,
+ a: a,
+ }
+}
+
+func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Add(ctx, req)
+}
+
+func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.AddByPath(ctx, req)
+}
+
+func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Apply(ctx, req)
+}
+
+func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.GetNodeByPath(ctx, req)
+}
+
+func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
+ ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
+ return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{
+ sender: srv,
+ ServerStream: srv,
+ ctxF: func() context.Context { return ctx },
+ })
+}
+
+func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
+ ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
+ return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{
+ sender: srv,
+ ServerStream: srv,
+ ctxF: func() context.Context { return ctx },
+ })
+}
+
+func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Healthcheck(ctx, req)
+}
+
+func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Move(ctx, req)
+}
+
+func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.Remove(ctx, req)
+}
+
+func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
+ ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
+ return i.s.TreeList(ctx, req)
+}
+
+type qosSend[T any] interface {
+ Send(T) error
+}
+
+type qosServerWrapper[T any] struct {
+ grpc.ServerStream
+ sender qosSend[T]
+ ctxF func() context.Context
+}
+
+func (w *qosServerWrapper[T]) Send(resp T) error {
+ return w.sender.Send(resp)
+}
+
+func (w *qosServerWrapper[T]) Context() context.Context {
+ return w.ctxF()
+}
diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go
index 1671d25111..647f8cb301 100644
--- a/pkg/services/tree/redirect.go
+++ b/pkg/services/tree/redirect.go
@@ -5,15 +5,33 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
+ "google.golang.org/grpc"
)
var errNoSuitableNode = errors.New("no node was found to execute the request")
+func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) {
+ var resp *Resp
+ var outErr error
+ err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool {
+ resp, outErr = callback(c, fCtx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
+}
+
// forEachNode executes callback for each node in the container until true is returned.
// Returns errNoSuitableNode if there was no successful attempt to dial any node.
-func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error {
+func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error {
for _, n := range cntNodes {
if bytes.Equal(n.PublicKey(), s.rawPub) {
return nil
@@ -23,17 +41,15 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
var called bool
for _, n := range cntNodes {
var stop bool
- n.IterateNetworkEndpoints(func(endpoint string) bool {
- c, err := s.cache.get(ctx, endpoint)
- if err != nil {
- return false
+ for endpoint := range n.NetworkEndpoints() {
+ stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool {
+ called = true
+ return f(fCtx, c)
+ })
+ if called {
+ break
}
-
- s.log.Debug("redirecting tree service query", zap.String("endpoint", endpoint))
- called = true
- stop = f(c)
- return true
- })
+ }
if stop {
return nil
}
@@ -43,3 +59,19 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
}
return nil
}
+
+func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
+ trace.WithAttributes(
+ attribute.String("endpoint", endpoint),
+ ))
+ defer span.End()
+
+ c, err := s.cache.get(ctx, endpoint)
+ if err != nil {
+ return false
+ }
+
+ s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint))
+ return f(ctx, c)
+}
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index 0c3c35f7b5..bc6e26fa7e 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -1,6 +1,7 @@
package tree
import (
+ "bytes"
"context"
"crypto/sha256"
"encoding/hex"
@@ -8,9 +9,13 @@ import (
"fmt"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -18,6 +23,7 @@ type movePair struct {
cid cidSDK.ID
treeID string
op *pilorama.Move
+ excPub []byte
}
type replicationTask struct {
@@ -27,7 +33,7 @@ type replicationTask struct {
type applyOp struct {
treeID string
- pilorama.CIDDescriptor
+ cid cidSDK.ID
pilorama.Move
}
@@ -35,67 +41,105 @@ const (
defaultReplicatorCapacity = 64
defaultReplicatorWorkerCount = 64
defaultReplicatorSendTimeout = time.Second * 5
+ defaultSyncBatchSize = 1000
)
-func (s *Service) localReplicationWorker() {
+func (s *Service) localReplicationWorker(ctx context.Context) {
for {
select {
case <-s.closeCh:
return
case op := <-s.replicateLocalCh:
- err := s.forest.TreeApply(op.CIDDescriptor, op.treeID, &op.Move, false)
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationOperation",
+ trace.WithAttributes(
+ attribute.String("tree_id", op.treeID),
+ attribute.String("container_id", op.cid.EncodeToString()),
+ ),
+ )
+
+ err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false)
if err != nil {
- s.log.Error("failed to apply replicated operation",
- zap.String("err", err.Error()))
+ s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation,
+ zap.Error(err))
}
+ span.End()
}
}
}
-func (s *Service) replicationWorker() {
+func (s *Service) replicationWorker(ctx context.Context) {
for {
select {
case <-s.closeCh:
return
case task := <-s.replicationTasks:
- var lastErr error
- var lastAddr string
-
- task.n.IterateNetworkEndpoints(func(addr string) bool {
- lastAddr = addr
-
- c, err := s.cache.get(context.Background(), addr)
- if err != nil {
- lastErr = fmt.Errorf("can't create client: %w", err)
- return false
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), s.replicatorTimeout)
- _, lastErr = c.Apply(ctx, task.req)
- cancel()
-
- return lastErr == nil
- })
-
- if lastErr != nil {
- if errors.Is(lastErr, errRecentlyFailed) {
- s.log.Debug("do not send update to the node",
- zap.String("last_error", lastErr.Error()))
- } else {
- s.log.Warn("failed to sent update to the node",
- zap.String("last_error", lastErr.Error()),
- zap.String("address", lastAddr),
- zap.String("key", hex.EncodeToString(task.n.PublicKey())))
- }
- }
+ _ = s.ReplicateTreeOp(ctx, task.n, task.req)
}
}
}
+func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req *ApplyRequest) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTask",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
+ ),
+ )
+ defer span.End()
+
+ start := time.Now()
+
+ var lastErr error
+ var lastAddr string
+
+ for addr := range n.NetworkEndpoints() {
+ lastAddr = addr
+ lastErr = s.apply(ctx, n, addr, req)
+ if lastErr == nil {
+ break
+ }
+ }
+
+ if lastErr != nil {
+ if errors.Is(lastErr, errRecentlyFailed) {
+ s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode,
+ zap.String("last_error", lastErr.Error()))
+ } else {
+ s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode,
+ zap.String("last_error", lastErr.Error()),
+ zap.String("address", lastAddr),
+ zap.String("key", hex.EncodeToString(n.PublicKey())))
+ }
+ s.metrics.AddReplicateTaskDuration(time.Since(start), false)
+ return lastErr
+ }
+ s.metrics.AddReplicateTaskDuration(time.Since(start), true)
+ return nil
+}
+
+func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
+ attribute.String("address", addr),
+ ),
+ )
+ defer span.End()
+
+ c, err := s.cache.get(ctx, addr)
+ if err != nil {
+ return fmt.Errorf("can't create client: %w", err)
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
+ _, err = c.Apply(ctx, req)
+ cancel()
+ return err
+}
+
func (s *Service) replicateLoop(ctx context.Context) {
- for i := 0; i < s.replicatorWorkerCount; i++ {
- go s.replicationWorker()
- go s.localReplicationWorker()
+ for range s.replicatorWorkerCount {
+ go s.replicationWorker(ctx)
+ go s.localReplicationWorker(ctx)
}
defer func() {
for len(s.replicationTasks) != 0 {
@@ -110,43 +154,74 @@ func (s *Service) replicateLoop(ctx context.Context) {
case <-ctx.Done():
return
case op := <-s.replicateCh:
- err := s.replicate(op)
+ start := time.Now()
+ err := s.replicate(ctx, op)
if err != nil {
- s.log.Error("error during replication",
- zap.String("err", err.Error()),
+ s.log.Error(ctx, logs.TreeErrorDuringReplication,
+ zap.Error(err),
zap.Stringer("cid", op.cid),
zap.String("treeID", op.treeID))
}
+ s.metrics.AddReplicateWaitDuration(time.Since(start), err == nil)
}
}
}
-func (s *Service) replicate(op movePair) error {
+func (s *Service) replicate(ctx context.Context, op movePair) error {
req := newApplyRequest(&op)
err := SignMessage(req, s.key)
if err != nil {
return fmt.Errorf("can't sign data: %w", err)
}
- nodes, localIndex, err := s.getContainerNodes(op.cid)
+ nodes, localIndex, err := s.getContainerNodes(ctx, op.cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
for i := range nodes {
- if i != localIndex {
+ if i != localIndex && !bytes.Equal(nodes[i].PublicKey(), op.excPub) {
s.replicationTasks <- replicationTask{nodes[i], req}
}
}
return nil
}
-func (s *Service) pushToQueue(cid cidSDK.ID, treeID string, op *pilorama.Move) {
+func (s *Service) replicateToRemoteNode(ctx context.Context, op movePair,
+ nodes []netmapSDK.NodeInfo, localIndex int,
+) ([]byte, error) {
+ req := newApplyRequest(&op)
+ err := SignMessage(req, s.key)
+ if err != nil {
+ return nil, fmt.Errorf("can't sign data: %w", err)
+ }
+
+ var errMulti error
+ for i := range nodes {
+ if i != localIndex {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ err := s.ReplicateTreeOp(ctx, nodes[i], req)
+ if err != nil {
+ errMulti = errors.Join(errMulti, err)
+ continue
+ }
+ return nodes[i].PublicKey(), nil
+ }
+ }
+ return nil, errMulti
+}
+
+func (s *Service) pushToQueue(cid cidSDK.ID, treeID string, op *pilorama.Move, excPub []byte) {
select {
case s.replicateCh <- movePair{
cid: cid,
treeID: treeID,
op: op,
+ excPub: excPub,
}:
default:
}
@@ -162,7 +237,7 @@ func newApplyRequest(op *movePair) *ApplyRequest {
TreeId: op.treeID,
Operation: &LogMove{
ParentId: op.op.Parent,
- Meta: op.op.Meta.Bytes(),
+ Meta: op.op.Bytes(),
ChildId: op.op.Child,
},
},
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 3176858e2b..81aa98b4d8 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -5,15 +5,23 @@ import (
"context"
"errors"
"fmt"
+ "slices"
"sync"
+ "sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
// Service represents tree-service capable of working with multiple
@@ -31,6 +39,10 @@ type Service struct {
syncChan chan struct{}
syncPool *ants.Pool
+ initialSyncDone atomic.Bool
+
+ apeChecker checkercore.CheckCore
+
// cnrMap contains existing (used) container IDs.
cnrMap map[cidSDK.ID]struct{}
// cnrMapMtx protects cnrMap
@@ -46,16 +58,19 @@ func New(opts ...Option) *Service {
s.replicatorChannelCapacity = defaultReplicatorCapacity
s.replicatorWorkerCount = defaultReplicatorWorkerCount
s.replicatorTimeout = defaultReplicatorSendTimeout
+ s.syncBatchSize = defaultSyncBatchSize
+ s.metrics = defaultMetricsRegister{}
+ s.authorizedKeys.Store(&[][]byte{})
for i := range opts {
opts[i](&s.cfg)
}
if s.log == nil {
- s.log = &logger.Logger{Logger: zap.NewNop()}
+ s.log = logger.NewLoggerWrapper(zap.NewNop())
}
- s.cache.init()
+ s.cache.init(s.key, s.ds)
s.closeCh = make(chan struct{})
s.replicateCh = make(chan movePair, s.replicatorChannelCapacity)
s.replicateLocalCh = make(chan applyOp)
@@ -65,12 +80,19 @@ func New(opts ...Option) *Service {
s.syncChan = make(chan struct{})
s.syncPool, _ = ants.NewPool(defaultSyncWorkerCount)
+ s.apeChecker = checkercore.New(s.localOverrideStorage, s.morphChainStorage, s.frostfsidSubjectProvider, s.state)
+
return &s
}
// Start starts the service.
func (s *Service) Start(ctx context.Context) {
+ ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String())
go s.replicateLoop(ctx)
+ if s.syncDisabled {
+ s.initialSyncDone.Store(true)
+ return
+ }
go s.syncLoop(ctx)
select {
@@ -89,6 +111,11 @@ func (s *Service) Shutdown() {
}
func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
+ defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx))
+ if !s.initialSyncDone.Load() {
+ return nil, ErrAlreadySyncing
+ }
+
b := req.GetBody()
var cid cidSDK.ID
@@ -96,30 +123,21 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
return nil, err
}
- err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *AddResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.Add(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).Add)
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
- log, err := s.forest.TreeMove(d, b.GetTreeId(), &pilorama.Move{
+ log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{
Parent: b.GetParentId(),
Child: pilorama.RootID,
Meta: pilorama.Meta{Items: protoToMeta(b.GetMeta())},
@@ -128,7 +146,16 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
return nil, err
}
- s.pushToQueue(cid, b.GetTreeId(), log)
+ excPub, err := s.replicateToRemoteNode(ctx, movePair{
+ cid: cid,
+ treeID: b.GetTreeId(),
+ op: log,
+ }, ns, pos)
+ if err != nil {
+ return nil, err
+ }
+ s.pushToQueue(cid, b.GetTreeId(), log, excPub)
+
return &AddResponse{
Body: &AddResponse_Body{
NodeId: log.Child,
@@ -137,6 +164,11 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
}
func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
+ defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx))
+ if !s.initialSyncDone.Load() {
+ return nil, ErrAlreadySyncing
+ }
+
b := req.GetBody()
var cid cidSDK.ID
@@ -144,26 +176,17 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
return nil, err
}
- err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *AddByPathResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.AddByPath(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).AddByPath)
}
meta := protoToMeta(b.GetMeta())
@@ -174,13 +197,21 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
- logs, err := s.forest.TreeAddByPath(d, b.GetTreeId(), attr, b.GetPath(), meta)
+ logs, err := s.forest.TreeAddByPath(ctx, d, b.GetTreeId(), attr, b.GetPath(), meta)
if err != nil {
return nil, err
}
for i := range logs {
- s.pushToQueue(cid, b.GetTreeId(), &logs[i])
+ excPub, err := s.replicateToRemoteNode(ctx, movePair{
+ cid: cid,
+ treeID: b.GetTreeId(),
+ op: &logs[i],
+ }, ns, pos)
+ if err != nil {
+ return nil, err
+ }
+ s.pushToQueue(cid, b.GetTreeId(), &logs[i], excPub)
}
nodes := make([]uint64, len(logs))
@@ -197,6 +228,11 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
}
func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
+ defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx))
+ if !s.initialSyncDone.Load() {
+ return nil, ErrAlreadySyncing
+ }
+
b := req.GetBody()
var cid cidSDK.ID
@@ -204,26 +240,17 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
return nil, err
}
- err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *RemoveResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.Remove(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).Remove)
}
if b.GetNodeId() == pilorama.RootID {
@@ -231,7 +258,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
- log, err := s.forest.TreeMove(d, b.GetTreeId(), &pilorama.Move{
+ log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{
Parent: pilorama.TrashID,
Child: b.GetNodeId(),
})
@@ -239,13 +266,26 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
return nil, err
}
- s.pushToQueue(cid, b.GetTreeId(), log)
+ excPub, err := s.replicateToRemoteNode(ctx, movePair{
+ cid: cid,
+ treeID: b.GetTreeId(),
+ op: log,
+ }, ns, pos)
+ if err != nil {
+ return nil, err
+ }
+ s.pushToQueue(cid, b.GetTreeId(), log, excPub)
return new(RemoveResponse), nil
}
// Move applies client operation to the specified tree and pushes in queue
// for replication on other nodes.
func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
+ defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx))
+ if !s.initialSyncDone.Load() {
+ return nil, ErrAlreadySyncing
+ }
+
b := req.GetBody()
var cid cidSDK.ID
@@ -253,26 +293,17 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
return nil, err
}
- err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *MoveResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.Move(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).Move)
}
if b.GetNodeId() == pilorama.RootID {
@@ -280,7 +311,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
- log, err := s.forest.TreeMove(d, b.GetTreeId(), &pilorama.Move{
+ log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{
Parent: b.GetParentId(),
Child: b.GetNodeId(),
Meta: pilorama.Meta{Items: protoToMeta(b.GetMeta())},
@@ -289,11 +320,24 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
return nil, err
}
- s.pushToQueue(cid, b.GetTreeId(), log)
+ excPub, err := s.replicateToRemoteNode(ctx, movePair{
+ cid: cid,
+ treeID: b.GetTreeId(),
+ op: log,
+ }, ns, pos)
+ if err != nil {
+ return nil, err
+ }
+ s.pushToQueue(cid, b.GetTreeId(), log, excPub)
return new(MoveResponse), nil
}
func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
+ defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx))
+ if !s.initialSyncDone.Load() {
+ return nil, ErrAlreadySyncing
+ }
+
b := req.GetBody()
var cid cidSDK.ID
@@ -301,26 +345,17 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
return nil, err
}
- err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *GetNodeByPathResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.GetNodeByPath(ctx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).GetNodeByPath)
}
attr := b.GetPathAttribute()
@@ -328,14 +363,14 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
attr = pilorama.AttributeFilename
}
- nodes, err := s.forest.TreeGetByPath(cid, b.GetTreeId(), attr, b.GetPath(), b.GetLatestOnly())
+ nodes, err := s.forest.TreeGetByPath(ctx, cid, b.GetTreeId(), attr, b.GetPath(), b.GetLatestOnly())
if err != nil {
return nil, err
}
- info := make([]*GetNodeByPathResponse_Info, 0, len(nodes))
+ info := make([]GetNodeByPathResponse_Info, 0, len(nodes))
for _, node := range nodes {
- m, parent, err := s.forest.TreeGetMeta(cid, b.GetTreeId(), node)
+ m, parent, err := s.forest.TreeGetMeta(ctx, cid, b.GetTreeId(), node)
if err != nil {
return nil, err
}
@@ -344,22 +379,21 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
x.ParentId = parent
x.NodeId = node
x.Timestamp = m.Time
- if b.AllAttributes {
+ if b.GetAllAttributes() {
x.Meta = metaToProto(m.Items)
} else {
+ var metaValue []KeyValue
for _, kv := range m.Items {
- for _, attr := range b.GetAttributes() {
- if kv.Key == attr {
- x.Meta = append(x.Meta, &KeyValue{
- Key: kv.Key,
- Value: kv.Value,
- })
- break
- }
+ if slices.Contains(b.GetAttributes(), kv.Key) {
+ metaValue = append(metaValue, KeyValue{
+ Key: kv.Key,
+ Value: kv.Value,
+ })
}
}
+ x.Meta = metaValue
}
- info = append(info, &x)
+ info = append(info, x)
}
return &GetNodeByPathResponse{
@@ -370,6 +404,11 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
}
func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
+ defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context()))
+ if !s.initialSyncDone.Load() {
+ return ErrAlreadySyncing
+ }
+
b := req.GetBody()
var cid cidSDK.ID
@@ -377,20 +416,20 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
return err
}
- err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(srv.Context(), cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetSubTreeClient
var outErr error
- err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
- cli, outErr = c.GetSubTree(srv.Context(), req)
+ err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
+ cli, outErr = c.GetSubTree(fCtx, req)
return true
})
if err != nil {
@@ -406,13 +445,130 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
return nil
}
- return getSubTree(srv, cid, b, s.forest)
+ return getSubTree(srv.Context(), srv, cid, b, s.forest)
}
-func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
+type stackItem struct {
+ values []pilorama.MultiNodeInfo
+ parent pilorama.MultiNode
+ last *pilorama.Cursor
+}
+
+func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
+ const batchSize = 1000
+
+ // For backward compatibility.
+ rootIDs := b.GetRootId()
+ if len(rootIDs) == 0 {
+ rootIDs = []uint64{0}
+ }
+
// Traverse the tree in a DFS manner. Because we need to support arbitrary depth,
// recursive implementation is not suitable here, so we maintain explicit stack.
- stack := [][]uint64{{b.GetRootId()}}
+ var ms []pilorama.KeyValue
+ var ps []uint64
+ var ts []uint64
+ for _, rootID := range rootIDs {
+ m, p, err := forest.TreeGetMeta(ctx, cid, b.GetTreeId(), rootID)
+ if err != nil {
+ return err
+ }
+ if ms == nil {
+ ms = m.Items
+ } else if len(m.Items) != 1 {
+ return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
+ }
+ ts = append(ts, m.Time)
+ ps = append(ps, p)
+ }
+
+ stack := []stackItem{{
+ values: []pilorama.MultiNodeInfo{{
+ Children: rootIDs,
+ Timestamps: ts,
+ Meta: ms,
+ Parents: ps,
+ }},
+ parent: ps,
+ }}
+
+ for {
+ if len(stack) == 0 {
+ break
+ } else if item := &stack[len(stack)-1]; len(item.values) == 0 {
+ if len(stack) == 1 {
+ break
+ }
+
+ var err error
+ item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
+ if err != nil {
+ return err
+ }
+
+ if len(item.values) == 0 {
+ stack = stack[:len(stack)-1]
+ continue
+ }
+ }
+
+ node, err := stackPopAndSend(stack, srv)
+ if err != nil {
+ return err
+ }
+
+ if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() {
+ children, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), node.Children, nil, batchSize)
+ if err != nil {
+ return err
+ }
+ if len(children) != 0 {
+ stack = append(stack, stackItem{
+ values: children,
+ parent: node.Children,
+ last: last,
+ })
+ }
+ }
+ }
+ return nil
+}
+
+func stackPopAndSend(stack []stackItem, srv TreeService_GetSubTreeServer) (pilorama.MultiNodeInfo, error) {
+ node := stack[len(stack)-1].values[0]
+ stack[len(stack)-1].values = stack[len(stack)-1].values[1:]
+
+ return node, srv.Send(&GetSubTreeResponse{
+ Body: &GetSubTreeResponse_Body{
+ NodeId: node.Children,
+ ParentId: node.Parents,
+ Timestamp: node.Timestamps,
+ Meta: metaToProto(node.Meta),
+ },
+ })
+}
+
+func getSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
+ if b.GetOrderBy().GetDirection() == GetSubTreeRequest_Body_Order_Asc {
+ return getSortedSubTree(ctx, srv, cid, b, forest)
+ }
+
+ var rootID uint64
+ if len(b.GetRootId()) > 0 {
+ rootID = b.GetRootId()[0]
+ }
+
+ // Traverse the tree in a DFS manner. Because we need to support arbitrary depth,
+ // recursive implementation is not suitable here, so we maintain explicit stack.
+ m, p, err := forest.TreeGetMeta(ctx, cid, b.GetTreeId(), rootID)
+ if err != nil {
+ return err
+ }
+ stack := [][]pilorama.NodeInfo{{{
+ ID: rootID,
+ Meta: m,
+ ParentID: p,
+ }}}
for {
if len(stack) == 0 {
@@ -422,19 +578,15 @@ func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRe
continue
}
- nodeID := stack[len(stack)-1][0]
+ node := stack[len(stack)-1][0]
stack[len(stack)-1] = stack[len(stack)-1][1:]
- m, p, err := forest.TreeGetMeta(cid, b.GetTreeId(), nodeID)
- if err != nil {
- return err
- }
err = srv.Send(&GetSubTreeResponse{
Body: &GetSubTreeResponse_Body{
- NodeId: nodeID,
- ParentId: p,
- Timestamp: m.Time,
- Meta: metaToProto(m.Items),
+ NodeId: []uint64{node.ID},
+ ParentId: []uint64{node.ParentID},
+ Timestamp: []uint64{node.Meta.Time},
+ Meta: metaToProto(node.Meta.Items),
},
})
if err != nil {
@@ -442,7 +594,11 @@ func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRe
}
if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() {
- children, err := forest.TreeGetChildren(cid, b.GetTreeId(), nodeID)
+ children, err := forest.TreeGetChildren(ctx, cid, b.GetTreeId(), node.ID)
+ if err != nil {
+ return err
+ }
+ children, err = sortByFilename(children, b.GetOrderBy().GetDirection())
if err != nil {
return err
}
@@ -454,8 +610,26 @@ func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRe
return nil
}
+func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Direction) ([]pilorama.NodeInfo, error) {
+ switch d {
+ case GetSubTreeRequest_Body_Order_None:
+ return nodes, nil
+ case GetSubTreeRequest_Body_Order_Asc:
+ if len(nodes) == 0 {
+ return nodes, nil
+ }
+ slices.SortFunc(nodes, func(a, b pilorama.NodeInfo) int {
+ return bytes.Compare(a.Meta.GetAttr(pilorama.AttributeFilename), b.Meta.GetAttr(pilorama.AttributeFilename))
+ })
+ return nodes, nil
+ default:
+ return nil, fmt.Errorf("unsupported order direction: %s", d.String())
+ }
+}
+
// Apply locally applies operation from the remote node to the tree.
-func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+ defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx))
err := verifyMessage(req)
if err != nil {
return nil, err
@@ -468,7 +642,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e
key := req.GetSignature().GetKey()
- _, pos, size, err := s.getContainerInfo(cid, key)
+ _, pos, _, err := s.getContainerInfo(ctx, cid, key)
if err != nil {
return nil, err
}
@@ -485,8 +659,8 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e
select {
case s.replicateLocalCh <- applyOp{
- treeID: req.GetBody().GetTreeId(),
- CIDDescriptor: pilorama.CIDDescriptor{CID: cid, Position: pos, Size: size},
+ treeID: req.GetBody().GetTreeId(),
+ cid: cid,
Move: pilorama.Move{
Parent: op.GetParentId(),
Child: op.GetChildId(),
@@ -499,6 +673,11 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e
}
func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
+ defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context()))
+ if !s.initialSyncDone.Load() {
+ return ErrAlreadySyncing
+ }
+
b := req.GetBody()
var cid cidSDK.ID
@@ -506,15 +685,15 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
return err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(srv.Context(), cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetOpLogClient
var outErr error
- err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
- cli, outErr = c.GetOpLog(srv.Context(), req)
+ err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
+ cli, outErr = c.GetOpLog(fCtx, req)
return true
})
if err != nil {
@@ -531,9 +710,13 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
}
h := b.GetHeight()
+ lastHeight, err := s.forest.TreeHeight(srv.Context(), cid, b.GetTreeId())
+ if err != nil {
+ return err
+ }
for {
- lm, err := s.forest.TreeGetOpLog(cid, b.GetTreeId(), h)
- if err != nil || lm.Time == 0 {
+ lm, err := s.forest.TreeGetOpLog(srv.Context(), cid, b.GetTreeId(), h)
+ if err != nil || lm.Time == 0 || lastHeight < lm.Time {
return err
}
@@ -541,7 +724,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
Body: &GetOpLogResponse_Body{
Operation: &LogMove{
ParentId: lm.Parent,
- Meta: lm.Meta.Bytes(),
+ Meta: lm.Bytes(),
ChildId: lm.Child,
},
},
@@ -555,6 +738,11 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
}
func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
+ defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx))
+ if !s.initialSyncDone.Load() {
+ return nil, ErrAlreadySyncing
+ }
+
var cid cidSDK.ID
err := cid.Decode(req.GetBody().GetContainerId())
@@ -570,24 +758,15 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
return nil, err
}
- ns, pos, err := s.getContainerNodes(cid)
+ ns, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, err
}
if pos < 0 {
- var resp *TreeListResponse
- var outErr error
- err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
- resp, outErr = c.TreeList(ctx, req)
- return outErr == nil
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
+ return relayUnary(ctx, s, ns, req, (TreeServiceClient).TreeList)
}
- ids, err := s.forest.TreeList(cid)
+ ids, err := s.forest.TreeList(ctx, cid)
if err != nil {
return nil, err
}
@@ -599,21 +778,19 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
}, nil
}
-func protoToMeta(arr []*KeyValue) []pilorama.KeyValue {
+func protoToMeta(arr []KeyValue) []pilorama.KeyValue {
meta := make([]pilorama.KeyValue, len(arr))
for i, kv := range arr {
- if kv != nil {
- meta[i].Key = kv.Key
- meta[i].Value = kv.Value
- }
+ meta[i].Key = kv.GetKey()
+ meta[i].Value = kv.GetValue()
}
return meta
}
-func metaToProto(arr []pilorama.KeyValue) []*KeyValue {
- meta := make([]*KeyValue, len(arr))
+func metaToProto(arr []pilorama.KeyValue) []KeyValue {
+ meta := make([]KeyValue, len(arr))
for i, kv := range arr {
- meta[i] = &KeyValue{
+ meta[i] = KeyValue{
Key: kv.Key,
Value: kv.Value,
}
@@ -623,8 +800,8 @@ func metaToProto(arr []pilorama.KeyValue) []*KeyValue {
// getContainerInfo returns the list of container nodes, position in the container for the node
// with pub key and total amount of nodes in all replicas.
-func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
- cntNodes, _, err := s.getContainerNodes(cid)
+func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
+ cntNodes, _, err := s.getContainerNodes(ctx, cid)
if err != nil {
return nil, 0, 0, err
}
@@ -638,5 +815,21 @@ func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeI
}
func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*HealthcheckResponse, error) {
+ if !s.initialSyncDone.Load() {
+ return nil, ErrAlreadySyncing
+ }
+
return new(HealthcheckResponse), nil
}
+
+func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) {
+ s.authorizedKeys.Store(fromPublicKeys(newKeys))
+}
+
+func fromPublicKeys(keys keys.PublicKeys) *[][]byte {
+ buff := make([][]byte, len(keys))
+ for i, k := range keys {
+ buff[i] = k.Bytes()
+ }
+ return &buff
+}
diff --git a/pkg/services/tree/service.pb.go b/pkg/services/tree/service.pb.go
deleted file mode 100644
index 6b5571c3a8..0000000000
--- a/pkg/services/tree/service.pb.go
+++ /dev/null
@@ -1,3454 +0,0 @@
-//*
-// Service for working with CRDT tree.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.21.12
-// source: pkg/services/tree/service.proto
-
-package tree
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type AddRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *AddRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddRequest) Reset() {
- *x = AddRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddRequest) ProtoMessage() {}
-
-func (x *AddRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddRequest.ProtoReflect.Descriptor instead.
-func (*AddRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *AddRequest) GetBody() *AddRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type AddResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *AddResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddResponse) Reset() {
- *x = AddResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddResponse) ProtoMessage() {}
-
-func (x *AddResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddResponse.ProtoReflect.Descriptor instead.
-func (*AddResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *AddResponse) GetBody() *AddResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type AddByPathRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *AddByPathRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddByPathRequest) Reset() {
- *x = AddByPathRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddByPathRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddByPathRequest) ProtoMessage() {}
-
-func (x *AddByPathRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddByPathRequest.ProtoReflect.Descriptor instead.
-func (*AddByPathRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *AddByPathRequest) GetBody() *AddByPathRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddByPathRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type AddByPathResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *AddByPathResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddByPathResponse) Reset() {
- *x = AddByPathResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddByPathResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddByPathResponse) ProtoMessage() {}
-
-func (x *AddByPathResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddByPathResponse.ProtoReflect.Descriptor instead.
-func (*AddByPathResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *AddByPathResponse) GetBody() *AddByPathResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddByPathResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *RemoveRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveRequest) Reset() {
- *x = RemoveRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveRequest) ProtoMessage() {}
-
-func (x *RemoveRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveRequest.ProtoReflect.Descriptor instead.
-func (*RemoveRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *RemoveRequest) GetBody() *RemoveRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *RemoveResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveResponse) Reset() {
- *x = RemoveResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveResponse) ProtoMessage() {}
-
-func (x *RemoveResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveResponse.ProtoReflect.Descriptor instead.
-func (*RemoveResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *RemoveResponse) GetBody() *RemoveResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type MoveRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *MoveRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *MoveRequest) Reset() {
- *x = MoveRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveRequest) ProtoMessage() {}
-
-func (x *MoveRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveRequest.ProtoReflect.Descriptor instead.
-func (*MoveRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *MoveRequest) GetBody() *MoveRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *MoveRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type MoveResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *MoveResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *MoveResponse) Reset() {
- *x = MoveResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveResponse) ProtoMessage() {}
-
-func (x *MoveResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveResponse.ProtoReflect.Descriptor instead.
-func (*MoveResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *MoveResponse) GetBody() *MoveResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *MoveResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetNodeByPathRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *GetNodeByPathRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetNodeByPathRequest) Reset() {
- *x = GetNodeByPathRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathRequest) ProtoMessage() {}
-
-func (x *GetNodeByPathRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathRequest.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *GetNodeByPathRequest) GetBody() *GetNodeByPathRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetNodeByPathRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetNodeByPathResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *GetNodeByPathResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetNodeByPathResponse) Reset() {
- *x = GetNodeByPathResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathResponse) ProtoMessage() {}
-
-func (x *GetNodeByPathResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathResponse.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *GetNodeByPathResponse) GetBody() *GetNodeByPathResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetNodeByPathResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetSubTreeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *GetSubTreeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetSubTreeRequest) Reset() {
- *x = GetSubTreeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSubTreeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSubTreeRequest) ProtoMessage() {}
-
-func (x *GetSubTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSubTreeRequest.ProtoReflect.Descriptor instead.
-func (*GetSubTreeRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *GetSubTreeRequest) GetBody() *GetSubTreeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetSubTreeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetSubTreeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *GetSubTreeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetSubTreeResponse) Reset() {
- *x = GetSubTreeResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSubTreeResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSubTreeResponse) ProtoMessage() {}
-
-func (x *GetSubTreeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSubTreeResponse.ProtoReflect.Descriptor instead.
-func (*GetSubTreeResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *GetSubTreeResponse) GetBody() *GetSubTreeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetSubTreeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type TreeListRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *TreeListRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *TreeListRequest) Reset() {
- *x = TreeListRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TreeListRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TreeListRequest) ProtoMessage() {}
-
-func (x *TreeListRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TreeListRequest.ProtoReflect.Descriptor instead.
-func (*TreeListRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *TreeListRequest) GetBody() *TreeListRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *TreeListRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type TreeListResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *TreeListResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *TreeListResponse) Reset() {
- *x = TreeListResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TreeListResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TreeListResponse) ProtoMessage() {}
-
-func (x *TreeListResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TreeListResponse.ProtoReflect.Descriptor instead.
-func (*TreeListResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *TreeListResponse) GetBody() *TreeListResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *TreeListResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type ApplyRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *ApplyRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ApplyRequest) Reset() {
- *x = ApplyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyRequest) ProtoMessage() {}
-
-func (x *ApplyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyRequest.ProtoReflect.Descriptor instead.
-func (*ApplyRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *ApplyRequest) GetBody() *ApplyRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ApplyRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type ApplyResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *ApplyResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ApplyResponse) Reset() {
- *x = ApplyResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyResponse) ProtoMessage() {}
-
-func (x *ApplyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyResponse.ProtoReflect.Descriptor instead.
-func (*ApplyResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *ApplyResponse) GetBody() *ApplyResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ApplyResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetOpLogRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *GetOpLogRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetOpLogRequest) Reset() {
- *x = GetOpLogRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOpLogRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOpLogRequest) ProtoMessage() {}
-
-func (x *GetOpLogRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOpLogRequest.ProtoReflect.Descriptor instead.
-func (*GetOpLogRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *GetOpLogRequest) GetBody() *GetOpLogRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetOpLogRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetOpLogResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *GetOpLogResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetOpLogResponse) Reset() {
- *x = GetOpLogResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOpLogResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOpLogResponse) ProtoMessage() {}
-
-func (x *GetOpLogResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOpLogResponse.ProtoReflect.Descriptor instead.
-func (*GetOpLogResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *GetOpLogResponse) GetBody() *GetOpLogResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetOpLogResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type HealthcheckResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *HealthcheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthcheckResponse) Reset() {
- *x = HealthcheckResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthcheckResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthcheckResponse) ProtoMessage() {}
-
-func (x *HealthcheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthcheckResponse.ProtoReflect.Descriptor instead.
-func (*HealthcheckResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *HealthcheckResponse) GetBody() *HealthcheckResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthcheckResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type HealthcheckRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *HealthcheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthcheckRequest) Reset() {
- *x = HealthcheckRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthcheckRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthcheckRequest) ProtoMessage() {}
-
-func (x *HealthcheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthcheckRequest.ProtoReflect.Descriptor instead.
-func (*HealthcheckRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *HealthcheckRequest) GetBody() *HealthcheckRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthcheckRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type AddRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // ID of the parent to attach node to.
- ParentId uint64 `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
- // Key-Value pairs with meta information.
- Meta []*KeyValue `protobuf:"bytes,4,rep,name=meta,proto3" json:"meta,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,5,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *AddRequest_Body) Reset() {
- *x = AddRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddRequest_Body) ProtoMessage() {}
-
-func (x *AddRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddRequest_Body.ProtoReflect.Descriptor instead.
-func (*AddRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{0, 0}
-}
-
-func (x *AddRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *AddRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *AddRequest_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-func (x *AddRequest_Body) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *AddRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-type AddResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the created node.
- NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
-}
-
-func (x *AddResponse_Body) Reset() {
- *x = AddResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddResponse_Body) ProtoMessage() {}
-
-func (x *AddResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddResponse_Body.ProtoReflect.Descriptor instead.
-func (*AddResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{1, 0}
-}
-
-func (x *AddResponse_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-
-type AddByPathRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Attribute to build path with. Default: "FileName".
- PathAttribute string `protobuf:"bytes,3,opt,name=path_attribute,json=pathAttribute,proto3" json:"path_attribute,omitempty"`
- // List of path components.
- Path []string `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"`
- // Node meta-information.
- Meta []*KeyValue `protobuf:"bytes,5,rep,name=meta,proto3" json:"meta,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,6,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *AddByPathRequest_Body) Reset() {
- *x = AddByPathRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddByPathRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddByPathRequest_Body) ProtoMessage() {}
-
-func (x *AddByPathRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddByPathRequest_Body.ProtoReflect.Descriptor instead.
-func (*AddByPathRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{2, 0}
-}
-
-func (x *AddByPathRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *AddByPathRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *AddByPathRequest_Body) GetPathAttribute() string {
- if x != nil {
- return x.PathAttribute
- }
- return ""
-}
-
-func (x *AddByPathRequest_Body) GetPath() []string {
- if x != nil {
- return x.Path
- }
- return nil
-}
-
-func (x *AddByPathRequest_Body) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *AddByPathRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-type AddByPathResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // List of all created nodes. The first one is the leaf.
- Nodes []uint64 `protobuf:"varint,1,rep,packed,name=nodes,proto3" json:"nodes,omitempty"`
- // ID of the parent node where new nodes were attached.
- ParentId uint64 `protobuf:"varint,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
-}
-
-func (x *AddByPathResponse_Body) Reset() {
- *x = AddByPathResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddByPathResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddByPathResponse_Body) ProtoMessage() {}
-
-func (x *AddByPathResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddByPathResponse_Body.ProtoReflect.Descriptor instead.
-func (*AddByPathResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{3, 0}
-}
-
-func (x *AddByPathResponse_Body) GetNodes() []uint64 {
- if x != nil {
- return x.Nodes
- }
- return nil
-}
-
-func (x *AddByPathResponse_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-type RemoveRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // ID of the node to remove.
- NodeId uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,4,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *RemoveRequest_Body) Reset() {
- *x = RemoveRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveRequest_Body) ProtoMessage() {}
-
-func (x *RemoveRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveRequest_Body.ProtoReflect.Descriptor instead.
-func (*RemoveRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{4, 0}
-}
-
-func (x *RemoveRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *RemoveRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *RemoveRequest_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-
-func (x *RemoveRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-type RemoveResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *RemoveResponse_Body) Reset() {
- *x = RemoveResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveResponse_Body) ProtoMessage() {}
-
-func (x *RemoveResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveResponse_Body.ProtoReflect.Descriptor instead.
-func (*RemoveResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{5, 0}
-}
-
-type MoveRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // TODO import neo.fs.v2.refs.ContainerID directly.
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // ID of the new parent.
- ParentId uint64 `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
- // ID of the node to move.
- NodeId uint64 `protobuf:"varint,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
- // Node meta-information.
- Meta []*KeyValue `protobuf:"bytes,5,rep,name=meta,proto3" json:"meta,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,6,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *MoveRequest_Body) Reset() {
- *x = MoveRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveRequest_Body) ProtoMessage() {}
-
-func (x *MoveRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveRequest_Body.ProtoReflect.Descriptor instead.
-func (*MoveRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{6, 0}
-}
-
-func (x *MoveRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *MoveRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *MoveRequest_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-func (x *MoveRequest_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-
-func (x *MoveRequest_Body) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *MoveRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-type MoveResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *MoveResponse_Body) Reset() {
- *x = MoveResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveResponse_Body) ProtoMessage() {}
-
-func (x *MoveResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveResponse_Body.ProtoReflect.Descriptor instead.
-func (*MoveResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{7, 0}
-}
-
-type GetNodeByPathRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Attribute to build path with. Default: "FileName".
- PathAttribute string `protobuf:"bytes,3,opt,name=path_attribute,json=pathAttribute,proto3" json:"path_attribute,omitempty"`
- // List of path components.
- Path []string `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"`
- // List of attributes to include in response.
- Attributes []string `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty"`
- // Flag to return only the latest version of node.
- LatestOnly bool `protobuf:"varint,6,opt,name=latest_only,json=latestOnly,proto3" json:"latest_only,omitempty"`
- // Flag to return all stored attributes.
- AllAttributes bool `protobuf:"varint,7,opt,name=all_attributes,json=allAttributes,proto3" json:"all_attributes,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,8,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *GetNodeByPathRequest_Body) Reset() {
- *x = GetNodeByPathRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathRequest_Body) ProtoMessage() {}
-
-func (x *GetNodeByPathRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathRequest_Body.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{8, 0}
-}
-
-func (x *GetNodeByPathRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *GetNodeByPathRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *GetNodeByPathRequest_Body) GetPathAttribute() string {
- if x != nil {
- return x.PathAttribute
- }
- return ""
-}
-
-func (x *GetNodeByPathRequest_Body) GetPath() []string {
- if x != nil {
- return x.Path
- }
- return nil
-}
-
-func (x *GetNodeByPathRequest_Body) GetAttributes() []string {
- if x != nil {
- return x.Attributes
- }
- return nil
-}
-
-func (x *GetNodeByPathRequest_Body) GetLatestOnly() bool {
- if x != nil {
- return x.LatestOnly
- }
- return false
-}
-
-func (x *GetNodeByPathRequest_Body) GetAllAttributes() bool {
- if x != nil {
- return x.AllAttributes
- }
- return false
-}
-
-func (x *GetNodeByPathRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-// Information about a single tree node.
-type GetNodeByPathResponse_Info struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Node ID.
- NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
- // Timestamp of the last operation with the node.
- Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Node meta-information.
- Meta []*KeyValue `protobuf:"bytes,3,rep,name=meta,proto3" json:"meta,omitempty"`
- // Parent ID.
- ParentId uint64 `protobuf:"varint,4,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
-}
-
-func (x *GetNodeByPathResponse_Info) Reset() {
- *x = GetNodeByPathResponse_Info{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathResponse_Info) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathResponse_Info) ProtoMessage() {}
-
-func (x *GetNodeByPathResponse_Info) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathResponse_Info.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathResponse_Info) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9, 0}
-}
-
-func (x *GetNodeByPathResponse_Info) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-
-func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 {
- if x != nil {
- return x.Timestamp
- }
- return 0
-}
-
-func (x *GetNodeByPathResponse_Info) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *GetNodeByPathResponse_Info) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-type GetNodeByPathResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // List of nodes stored by path.
- Nodes []*GetNodeByPathResponse_Info `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
-}
-
-func (x *GetNodeByPathResponse_Body) Reset() {
- *x = GetNodeByPathResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathResponse_Body) ProtoMessage() {}
-
-func (x *GetNodeByPathResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathResponse_Body.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9, 1}
-}
-
-func (x *GetNodeByPathResponse_Body) GetNodes() []*GetNodeByPathResponse_Info {
- if x != nil {
- return x.Nodes
- }
- return nil
-}
-
-type GetSubTreeRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // ID of the root node of a subtree.
- RootId uint64 `protobuf:"varint,3,opt,name=root_id,json=rootId,proto3" json:"root_id,omitempty"`
- // Optional depth of the traversal. Zero means return only root.
- // Maximum depth is 10.
- Depth uint32 `protobuf:"varint,4,opt,name=depth,proto3" json:"depth,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,5,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *GetSubTreeRequest_Body) Reset() {
- *x = GetSubTreeRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSubTreeRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSubTreeRequest_Body) ProtoMessage() {}
-
-func (x *GetSubTreeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSubTreeRequest_Body.ProtoReflect.Descriptor instead.
-func (*GetSubTreeRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0}
-}
-
-func (x *GetSubTreeRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *GetSubTreeRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *GetSubTreeRequest_Body) GetRootId() uint64 {
- if x != nil {
- return x.RootId
- }
- return 0
-}
-
-func (x *GetSubTreeRequest_Body) GetDepth() uint32 {
- if x != nil {
- return x.Depth
- }
- return 0
-}
-
-func (x *GetSubTreeRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-type GetSubTreeResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the node.
- NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
- // ID of the parent.
- ParentId uint64 `protobuf:"varint,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
- // Time node was first added to a tree.
- Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Node meta-information.
- Meta []*KeyValue `protobuf:"bytes,4,rep,name=meta,proto3" json:"meta,omitempty"`
-}
-
-func (x *GetSubTreeResponse_Body) Reset() {
- *x = GetSubTreeResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSubTreeResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSubTreeResponse_Body) ProtoMessage() {}
-
-func (x *GetSubTreeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSubTreeResponse_Body.ProtoReflect.Descriptor instead.
-func (*GetSubTreeResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{11, 0}
-}
-
-func (x *GetSubTreeResponse_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-
-func (x *GetSubTreeResponse_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-func (x *GetSubTreeResponse_Body) GetTimestamp() uint64 {
- if x != nil {
- return x.Timestamp
- }
- return 0
-}
-
-func (x *GetSubTreeResponse_Body) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-type TreeListRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-}
-
-func (x *TreeListRequest_Body) Reset() {
- *x = TreeListRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TreeListRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TreeListRequest_Body) ProtoMessage() {}
-
-func (x *TreeListRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TreeListRequest_Body.ProtoReflect.Descriptor instead.
-func (*TreeListRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{12, 0}
-}
-
-func (x *TreeListRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-type TreeListResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Tree IDs.
- Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"`
-}
-
-func (x *TreeListResponse_Body) Reset() {
- *x = TreeListResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TreeListResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TreeListResponse_Body) ProtoMessage() {}
-
-func (x *TreeListResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TreeListResponse_Body.ProtoReflect.Descriptor instead.
-func (*TreeListResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{13, 0}
-}
-
-func (x *TreeListResponse_Body) GetIds() []string {
- if x != nil {
- return x.Ids
- }
- return nil
-}
-
-type ApplyRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Operation to be applied.
- Operation *LogMove `protobuf:"bytes,3,opt,name=operation,proto3" json:"operation,omitempty"`
-}
-
-func (x *ApplyRequest_Body) Reset() {
- *x = ApplyRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyRequest_Body) ProtoMessage() {}
-
-func (x *ApplyRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyRequest_Body.ProtoReflect.Descriptor instead.
-func (*ApplyRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{14, 0}
-}
-
-func (x *ApplyRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *ApplyRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *ApplyRequest_Body) GetOperation() *LogMove {
- if x != nil {
- return x.Operation
- }
- return nil
-}
-
-type ApplyResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ApplyResponse_Body) Reset() {
- *x = ApplyResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyResponse_Body) ProtoMessage() {}
-
-func (x *ApplyResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyResponse_Body.ProtoReflect.Descriptor instead.
-func (*ApplyResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{15, 0}
-}
-
-type GetOpLogRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Starting height to return logs from.
- Height uint64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
- // Amount of operations to return.
- Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
-}
-
-func (x *GetOpLogRequest_Body) Reset() {
- *x = GetOpLogRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOpLogRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOpLogRequest_Body) ProtoMessage() {}
-
-func (x *GetOpLogRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOpLogRequest_Body.ProtoReflect.Descriptor instead.
-func (*GetOpLogRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{16, 0}
-}
-
-func (x *GetOpLogRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *GetOpLogRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *GetOpLogRequest_Body) GetHeight() uint64 {
- if x != nil {
- return x.Height
- }
- return 0
-}
-
-func (x *GetOpLogRequest_Body) GetCount() uint64 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-
-type GetOpLogResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Operation on a tree.
- Operation *LogMove `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"`
-}
-
-func (x *GetOpLogResponse_Body) Reset() {
- *x = GetOpLogResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOpLogResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOpLogResponse_Body) ProtoMessage() {}
-
-func (x *GetOpLogResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOpLogResponse_Body.ProtoReflect.Descriptor instead.
-func (*GetOpLogResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{17, 0}
-}
-
-func (x *GetOpLogResponse_Body) GetOperation() *LogMove {
- if x != nil {
- return x.Operation
- }
- return nil
-}
-
-type HealthcheckResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *HealthcheckResponse_Body) Reset() {
- *x = HealthcheckResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthcheckResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthcheckResponse_Body) ProtoMessage() {}
-
-func (x *HealthcheckResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthcheckResponse_Body.ProtoReflect.Descriptor instead.
-func (*HealthcheckResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{18, 0}
-}
-
-type HealthcheckRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *HealthcheckRequest_Body) Reset() {
- *x = HealthcheckRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthcheckRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthcheckRequest_Body) ProtoMessage() {}
-
-func (x *HealthcheckRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthcheckRequest_Body.ProtoReflect.Descriptor instead.
-func (*HealthcheckRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{19, 0}
-}
-
-var File_pkg_services_tree_service_proto protoreflect.FileDescriptor
-
-var file_pkg_services_tree_service_proto_rawDesc = []byte{
- 0x0a, 0x1f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74,
- 0x72, 0x65, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x04, 0x74, 0x72, 0x65, 0x65, 0x1a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x02, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0xa6, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
- 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b,
- 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74,
- 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72,
- 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
- 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
- 0x64, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
- 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f,
- 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61,
- 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x0b, 0x41, 0x64, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64,
- 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x1f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6e,
- 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f,
- 0x64, 0x65, 0x49, 0x64, 0x22, 0xb9, 0x02, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61,
- 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41,
- 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
- 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xc4, 0x01, 0x0a, 0x04, 0x42, 0x6f,
- 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
- 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x25,
- 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x41, 0x74, 0x74, 0x72,
- 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74,
- 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b,
- 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a,
- 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x22, 0xaf, 0x01, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x42,
- 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
- 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x39, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x05,
- 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x49, 0x64, 0x22, 0xec, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07,
- 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74,
- 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x21,
- 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0x76, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xaa, 0x02, 0x0a, 0x0b, 0x4d, 0x6f,
- 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d,
- 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
- 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x1a, 0xbf, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a,
- 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64,
- 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69,
- 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12,
- 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
- 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d,
- 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f,
- 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65,
- 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x72, 0x0a, 0x0c, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x6f, 0x76, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x85, 0x03, 0x0a, 0x14, 0x47,
- 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65,
- 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
- 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x88, 0x02, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e,
- 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62,
- 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69,
- 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x74, 0x74,
- 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x73,
- 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x6c, 0x61,
- 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x5f,
- 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12,
- 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b,
- 0x65, 0x6e, 0x22, 0xbc, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79,
- 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x72, 0x65,
- 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64,
- 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65,
- 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04,
- 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
- 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
- 0x64, 0x1a, 0x3e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x6e, 0x6f, 0x64,
- 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
- 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65,
- 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74,
- 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74,
- 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x94, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64,
- 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
- 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a,
- 0x07, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06,
- 0x72, 0x6f, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x12, 0x21, 0x0a, 0x0c,
- 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22,
- 0xf6, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53,
- 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74,
- 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
- 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0x9b, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x65,
- 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x72, 0x65,
- 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x29, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8c, 0x01, 0x0a, 0x10, 0x54, 0x72, 0x65, 0x65, 0x4c,
- 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65,
- 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x18, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xdb, 0x01, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x1a, 0x6f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f,
- 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a,
- 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
- 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x74, 0x72, 0x65, 0x65,
- 0x2e, 0x4c, 0x6f, 0x67, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x22, 0x74, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe2, 0x01, 0x0a, 0x0f, 0x47, 0x65,
- 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x72,
- 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x70, 0x0a, 0x04,
- 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
- 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74,
- 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64,
- 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xa7,
- 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f,
- 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x33, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2b, 0x0a, 0x09, 0x6f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x09, 0x6f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x80, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x32, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63,
- 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x7e, 0x0a, 0x12, 0x48,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x31, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1d, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0xd6, 0x04, 0x0a, 0x0b,
- 0x54, 0x72, 0x65, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x41,
- 0x64, 0x64, 0x12, 0x10, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x42, 0x79,
- 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x42,
- 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x74,
- 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12,
- 0x13, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x4d, 0x6f,
- 0x76, 0x65, 0x12, 0x11, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x6f, 0x76,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x47, 0x65, 0x74,
- 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1a, 0x2e, 0x74, 0x72, 0x65,
- 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65,
- 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65,
- 0x65, 0x12, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54,
- 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x72, 0x65,
- 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x08, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69,
- 0x73, 0x74, 0x12, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69,
- 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65,
- 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x30, 0x0a, 0x05, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x12, 0x2e, 0x74, 0x72, 0x65,
- 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x12,
- 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65,
- 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01,
- 0x12, 0x42, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x12,
- 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x74, 0x72, 0x65, 0x65,
- 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73,
- 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f,
- 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f,
- 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
- 0x74, 0x72, 0x65, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_tree_service_proto_rawDescOnce sync.Once
- file_pkg_services_tree_service_proto_rawDescData = file_pkg_services_tree_service_proto_rawDesc
-)
-
-func file_pkg_services_tree_service_proto_rawDescGZIP() []byte {
- file_pkg_services_tree_service_proto_rawDescOnce.Do(func() {
- file_pkg_services_tree_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_tree_service_proto_rawDescData)
- })
- return file_pkg_services_tree_service_proto_rawDescData
-}
-
-var file_pkg_services_tree_service_proto_msgTypes = make([]protoimpl.MessageInfo, 41)
-var file_pkg_services_tree_service_proto_goTypes = []interface{}{
- (*AddRequest)(nil), // 0: tree.AddRequest
- (*AddResponse)(nil), // 1: tree.AddResponse
- (*AddByPathRequest)(nil), // 2: tree.AddByPathRequest
- (*AddByPathResponse)(nil), // 3: tree.AddByPathResponse
- (*RemoveRequest)(nil), // 4: tree.RemoveRequest
- (*RemoveResponse)(nil), // 5: tree.RemoveResponse
- (*MoveRequest)(nil), // 6: tree.MoveRequest
- (*MoveResponse)(nil), // 7: tree.MoveResponse
- (*GetNodeByPathRequest)(nil), // 8: tree.GetNodeByPathRequest
- (*GetNodeByPathResponse)(nil), // 9: tree.GetNodeByPathResponse
- (*GetSubTreeRequest)(nil), // 10: tree.GetSubTreeRequest
- (*GetSubTreeResponse)(nil), // 11: tree.GetSubTreeResponse
- (*TreeListRequest)(nil), // 12: tree.TreeListRequest
- (*TreeListResponse)(nil), // 13: tree.TreeListResponse
- (*ApplyRequest)(nil), // 14: tree.ApplyRequest
- (*ApplyResponse)(nil), // 15: tree.ApplyResponse
- (*GetOpLogRequest)(nil), // 16: tree.GetOpLogRequest
- (*GetOpLogResponse)(nil), // 17: tree.GetOpLogResponse
- (*HealthcheckResponse)(nil), // 18: tree.HealthcheckResponse
- (*HealthcheckRequest)(nil), // 19: tree.HealthcheckRequest
- (*AddRequest_Body)(nil), // 20: tree.AddRequest.Body
- (*AddResponse_Body)(nil), // 21: tree.AddResponse.Body
- (*AddByPathRequest_Body)(nil), // 22: tree.AddByPathRequest.Body
- (*AddByPathResponse_Body)(nil), // 23: tree.AddByPathResponse.Body
- (*RemoveRequest_Body)(nil), // 24: tree.RemoveRequest.Body
- (*RemoveResponse_Body)(nil), // 25: tree.RemoveResponse.Body
- (*MoveRequest_Body)(nil), // 26: tree.MoveRequest.Body
- (*MoveResponse_Body)(nil), // 27: tree.MoveResponse.Body
- (*GetNodeByPathRequest_Body)(nil), // 28: tree.GetNodeByPathRequest.Body
- (*GetNodeByPathResponse_Info)(nil), // 29: tree.GetNodeByPathResponse.Info
- (*GetNodeByPathResponse_Body)(nil), // 30: tree.GetNodeByPathResponse.Body
- (*GetSubTreeRequest_Body)(nil), // 31: tree.GetSubTreeRequest.Body
- (*GetSubTreeResponse_Body)(nil), // 32: tree.GetSubTreeResponse.Body
- (*TreeListRequest_Body)(nil), // 33: tree.TreeListRequest.Body
- (*TreeListResponse_Body)(nil), // 34: tree.TreeListResponse.Body
- (*ApplyRequest_Body)(nil), // 35: tree.ApplyRequest.Body
- (*ApplyResponse_Body)(nil), // 36: tree.ApplyResponse.Body
- (*GetOpLogRequest_Body)(nil), // 37: tree.GetOpLogRequest.Body
- (*GetOpLogResponse_Body)(nil), // 38: tree.GetOpLogResponse.Body
- (*HealthcheckResponse_Body)(nil), // 39: tree.HealthcheckResponse.Body
- (*HealthcheckRequest_Body)(nil), // 40: tree.HealthcheckRequest.Body
- (*Signature)(nil), // 41: tree.Signature
- (*KeyValue)(nil), // 42: tree.KeyValue
- (*LogMove)(nil), // 43: tree.LogMove
-}
-var file_pkg_services_tree_service_proto_depIdxs = []int32{
- 20, // 0: tree.AddRequest.body:type_name -> tree.AddRequest.Body
- 41, // 1: tree.AddRequest.signature:type_name -> tree.Signature
- 21, // 2: tree.AddResponse.body:type_name -> tree.AddResponse.Body
- 41, // 3: tree.AddResponse.signature:type_name -> tree.Signature
- 22, // 4: tree.AddByPathRequest.body:type_name -> tree.AddByPathRequest.Body
- 41, // 5: tree.AddByPathRequest.signature:type_name -> tree.Signature
- 23, // 6: tree.AddByPathResponse.body:type_name -> tree.AddByPathResponse.Body
- 41, // 7: tree.AddByPathResponse.signature:type_name -> tree.Signature
- 24, // 8: tree.RemoveRequest.body:type_name -> tree.RemoveRequest.Body
- 41, // 9: tree.RemoveRequest.signature:type_name -> tree.Signature
- 25, // 10: tree.RemoveResponse.body:type_name -> tree.RemoveResponse.Body
- 41, // 11: tree.RemoveResponse.signature:type_name -> tree.Signature
- 26, // 12: tree.MoveRequest.body:type_name -> tree.MoveRequest.Body
- 41, // 13: tree.MoveRequest.signature:type_name -> tree.Signature
- 27, // 14: tree.MoveResponse.body:type_name -> tree.MoveResponse.Body
- 41, // 15: tree.MoveResponse.signature:type_name -> tree.Signature
- 28, // 16: tree.GetNodeByPathRequest.body:type_name -> tree.GetNodeByPathRequest.Body
- 41, // 17: tree.GetNodeByPathRequest.signature:type_name -> tree.Signature
- 30, // 18: tree.GetNodeByPathResponse.body:type_name -> tree.GetNodeByPathResponse.Body
- 41, // 19: tree.GetNodeByPathResponse.signature:type_name -> tree.Signature
- 31, // 20: tree.GetSubTreeRequest.body:type_name -> tree.GetSubTreeRequest.Body
- 41, // 21: tree.GetSubTreeRequest.signature:type_name -> tree.Signature
- 32, // 22: tree.GetSubTreeResponse.body:type_name -> tree.GetSubTreeResponse.Body
- 41, // 23: tree.GetSubTreeResponse.signature:type_name -> tree.Signature
- 33, // 24: tree.TreeListRequest.body:type_name -> tree.TreeListRequest.Body
- 41, // 25: tree.TreeListRequest.signature:type_name -> tree.Signature
- 34, // 26: tree.TreeListResponse.body:type_name -> tree.TreeListResponse.Body
- 41, // 27: tree.TreeListResponse.signature:type_name -> tree.Signature
- 35, // 28: tree.ApplyRequest.body:type_name -> tree.ApplyRequest.Body
- 41, // 29: tree.ApplyRequest.signature:type_name -> tree.Signature
- 36, // 30: tree.ApplyResponse.body:type_name -> tree.ApplyResponse.Body
- 41, // 31: tree.ApplyResponse.signature:type_name -> tree.Signature
- 37, // 32: tree.GetOpLogRequest.body:type_name -> tree.GetOpLogRequest.Body
- 41, // 33: tree.GetOpLogRequest.signature:type_name -> tree.Signature
- 38, // 34: tree.GetOpLogResponse.body:type_name -> tree.GetOpLogResponse.Body
- 41, // 35: tree.GetOpLogResponse.signature:type_name -> tree.Signature
- 39, // 36: tree.HealthcheckResponse.body:type_name -> tree.HealthcheckResponse.Body
- 41, // 37: tree.HealthcheckResponse.signature:type_name -> tree.Signature
- 40, // 38: tree.HealthcheckRequest.body:type_name -> tree.HealthcheckRequest.Body
- 41, // 39: tree.HealthcheckRequest.signature:type_name -> tree.Signature
- 42, // 40: tree.AddRequest.Body.meta:type_name -> tree.KeyValue
- 42, // 41: tree.AddByPathRequest.Body.meta:type_name -> tree.KeyValue
- 42, // 42: tree.MoveRequest.Body.meta:type_name -> tree.KeyValue
- 42, // 43: tree.GetNodeByPathResponse.Info.meta:type_name -> tree.KeyValue
- 29, // 44: tree.GetNodeByPathResponse.Body.nodes:type_name -> tree.GetNodeByPathResponse.Info
- 42, // 45: tree.GetSubTreeResponse.Body.meta:type_name -> tree.KeyValue
- 43, // 46: tree.ApplyRequest.Body.operation:type_name -> tree.LogMove
- 43, // 47: tree.GetOpLogResponse.Body.operation:type_name -> tree.LogMove
- 0, // 48: tree.TreeService.Add:input_type -> tree.AddRequest
- 2, // 49: tree.TreeService.AddByPath:input_type -> tree.AddByPathRequest
- 4, // 50: tree.TreeService.Remove:input_type -> tree.RemoveRequest
- 6, // 51: tree.TreeService.Move:input_type -> tree.MoveRequest
- 8, // 52: tree.TreeService.GetNodeByPath:input_type -> tree.GetNodeByPathRequest
- 10, // 53: tree.TreeService.GetSubTree:input_type -> tree.GetSubTreeRequest
- 12, // 54: tree.TreeService.TreeList:input_type -> tree.TreeListRequest
- 14, // 55: tree.TreeService.Apply:input_type -> tree.ApplyRequest
- 16, // 56: tree.TreeService.GetOpLog:input_type -> tree.GetOpLogRequest
- 19, // 57: tree.TreeService.Healthcheck:input_type -> tree.HealthcheckRequest
- 1, // 58: tree.TreeService.Add:output_type -> tree.AddResponse
- 3, // 59: tree.TreeService.AddByPath:output_type -> tree.AddByPathResponse
- 5, // 60: tree.TreeService.Remove:output_type -> tree.RemoveResponse
- 7, // 61: tree.TreeService.Move:output_type -> tree.MoveResponse
- 9, // 62: tree.TreeService.GetNodeByPath:output_type -> tree.GetNodeByPathResponse
- 11, // 63: tree.TreeService.GetSubTree:output_type -> tree.GetSubTreeResponse
- 13, // 64: tree.TreeService.TreeList:output_type -> tree.TreeListResponse
- 15, // 65: tree.TreeService.Apply:output_type -> tree.ApplyResponse
- 17, // 66: tree.TreeService.GetOpLog:output_type -> tree.GetOpLogResponse
- 18, // 67: tree.TreeService.Healthcheck:output_type -> tree.HealthcheckResponse
- 58, // [58:68] is the sub-list for method output_type
- 48, // [48:58] is the sub-list for method input_type
- 48, // [48:48] is the sub-list for extension type_name
- 48, // [48:48] is the sub-list for extension extendee
- 0, // [0:48] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_tree_service_proto_init() }
-func file_pkg_services_tree_service_proto_init() {
- if File_pkg_services_tree_service_proto != nil {
- return
- }
- file_pkg_services_tree_types_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_tree_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddByPathRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddByPathResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSubTreeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSubTreeResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TreeListRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TreeListResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOpLogRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOpLogResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthcheckResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthcheckRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddByPathRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddByPathResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathResponse_Info); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSubTreeRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSubTreeResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TreeListRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TreeListResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOpLogRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOpLogResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthcheckResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthcheckRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_tree_service_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 41,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_pkg_services_tree_service_proto_goTypes,
- DependencyIndexes: file_pkg_services_tree_service_proto_depIdxs,
- MessageInfos: file_pkg_services_tree_service_proto_msgTypes,
- }.Build()
- File_pkg_services_tree_service_proto = out.File
- file_pkg_services_tree_service_proto_rawDesc = nil
- file_pkg_services_tree_service_proto_goTypes = nil
- file_pkg_services_tree_service_proto_depIdxs = nil
-}
diff --git a/pkg/services/tree/service.proto b/pkg/services/tree/service.proto
index 182d8adb2e..88bf0bca45 100644
--- a/pkg/services/tree/service.proto
+++ b/pkg/services/tree/service.proto
@@ -28,25 +28,25 @@ service TreeService {
// Otherwise, a request is denied.
// Add adds new node to the tree. Invoked by a client.
- rpc Add (AddRequest) returns (AddResponse);
+ rpc Add(AddRequest) returns (AddResponse);
// AddByPath adds new node to the tree by path. Invoked by a client.
- rpc AddByPath (AddByPathRequest) returns (AddByPathResponse);
+ rpc AddByPath(AddByPathRequest) returns (AddByPathResponse);
// Remove removes node from the tree. Invoked by a client.
- rpc Remove (RemoveRequest) returns (RemoveResponse);
+ rpc Remove(RemoveRequest) returns (RemoveResponse);
// Move moves node from one parent to another. Invoked by a client.
- rpc Move (MoveRequest) returns (MoveResponse);
+ rpc Move(MoveRequest) returns (MoveResponse);
// GetNodeByPath returns list of IDs corresponding to a specific filepath.
- rpc GetNodeByPath (GetNodeByPathRequest) returns (GetNodeByPathResponse);
+ rpc GetNodeByPath(GetNodeByPathRequest) returns (GetNodeByPathResponse);
// GetSubTree returns tree corresponding to a specific node.
- rpc GetSubTree (GetSubTreeRequest) returns (stream GetSubTreeResponse);
+ rpc GetSubTree(GetSubTreeRequest) returns (stream GetSubTreeResponse);
// TreeList return list of the existing trees in the container.
- rpc TreeList (TreeListRequest) returns (TreeListResponse);
+ rpc TreeList(TreeListRequest) returns (TreeListResponse);
/* Synchronization API */
// Apply pushes log operation from another node to the current.
// The request must be signed by a container node.
- rpc Apply (ApplyRequest) returns (ApplyResponse);
+ rpc Apply(ApplyRequest) returns (ApplyResponse);
// GetOpLog returns a stream of logged operations starting from some height.
rpc GetOpLog(GetOpLogRequest) returns (stream GetOpLogResponse);
// Healthcheck is a dummy rpc to check service availability
@@ -85,7 +85,6 @@ message AddResponse {
Signature signature = 2;
};
-
message AddByPathRequest {
message Body {
// Container ID in V2 format.
@@ -122,7 +121,6 @@ message AddByPathResponse {
Signature signature = 2;
};
-
message RemoveRequest {
message Body {
// Container ID in V2 format.
@@ -142,8 +140,7 @@ message RemoveRequest {
}
message RemoveResponse {
- message Body {
- }
+ message Body {}
// Response body.
Body body = 1;
@@ -151,7 +148,6 @@ message RemoveResponse {
Signature signature = 2;
};
-
message MoveRequest {
message Body {
// TODO import neo.fs.v2.refs.ContainerID directly.
@@ -176,8 +172,7 @@ message MoveRequest {
}
message MoveResponse {
- message Body {
- }
+ message Body {}
// Response body.
Body body = 1;
@@ -185,7 +180,6 @@ message MoveResponse {
Signature signature = 2;
};
-
message GetNodeByPathRequest {
message Body {
// Container ID in V2 format.
@@ -235,20 +229,28 @@ message GetNodeByPathResponse {
Signature signature = 2;
};
-
message GetSubTreeRequest {
message Body {
+ message Order {
+ enum Direction {
+ None = 0;
+ Asc = 1;
+ }
+ Direction direction = 1;
+ }
// Container ID in V2 format.
bytes container_id = 1;
// The name of the tree.
string tree_id = 2;
- // ID of the root node of a subtree.
- uint64 root_id = 3;
+ // IDs of the root nodes of a subtree forest.
+ repeated uint64 root_id = 3 [ packed = false ];
// Optional depth of the traversal. Zero means return only root.
// Maximum depth is 10.
uint32 depth = 4;
// Bearer token in V2 format.
bytes bearer_token = 5;
+ // Result ordering.
+ Order order_by = 6;
}
// Request body.
@@ -260,11 +262,11 @@ message GetSubTreeRequest {
message GetSubTreeResponse {
message Body {
// ID of the node.
- uint64 node_id = 1;
+ repeated uint64 node_id = 1 [ packed = false ];
// ID of the parent.
- uint64 parent_id = 2;
+ repeated uint64 parent_id = 2 [ packed = false ];
// Time node was first added to a tree.
- uint64 timestamp = 3;
+ repeated uint64 timestamp = 3 [ packed = false ];
// Node meta-information.
repeated KeyValue meta = 4;
}
@@ -298,7 +300,6 @@ message TreeListResponse {
Signature signature = 2;
}
-
message ApplyRequest {
message Body {
// Container ID in V2 format.
@@ -316,8 +317,7 @@ message ApplyRequest {
}
message ApplyResponse {
- message Body {
- }
+ message Body {}
// Response body.
Body body = 1;
@@ -325,7 +325,6 @@ message ApplyResponse {
Signature signature = 2;
};
-
message GetOpLogRequest {
message Body {
// Container ID in V2 format.
@@ -357,8 +356,7 @@ message GetOpLogResponse {
};
message HealthcheckResponse {
- message Body {
- }
+ message Body {}
// Response body.
Body body = 1;
@@ -367,8 +365,7 @@ message HealthcheckResponse {
};
message HealthcheckRequest {
- message Body {
- }
+ message Body {}
// Request body.
Body body = 1;
diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go
index 42b7ba3fc7..88d0026217 100644
--- a/pkg/services/tree/service_frostfs.pb.go
+++ b/pkg/services/tree/service_frostfs.pb.go
@@ -2,78 +2,373 @@
package tree
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ protowire "google.golang.org/protobuf/encoding/protowire"
+ strconv "strconv"
+)
+
+type AddRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ ParentId uint64 `json:"parentId"`
+ Meta []KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddRequest_Body)(nil)
+ _ json.Marshaler = (*AddRequest_Body)(nil)
+ _ json.Unmarshaler = (*AddRequest_Body)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *AddRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
size += proto.UInt64Size(3, x.ParentId)
for i := range x.Meta {
- size += proto.NestedStructureSize(4, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i])
}
size += proto.BytesSize(5, x.BearerToken)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.ParentId)
- for i := range x.Meta {
- offset += proto.NestedStructureMarshal(4, buf[offset:], x.Meta[i])
- }
- offset += proto.BytesMarshal(5, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(3, x.ParentId)
+ }
+ for i := range x.Meta {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(5, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ case 4: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 5: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *AddRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *AddRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *AddRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *AddRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *AddRequest_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *AddRequest_Body) SetParentId(v uint64) {
+ x.ParentId = v
+}
+func (x *AddRequest_Body) GetMeta() []KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *AddRequest_Body) SetMeta(v []KeyValue) {
+ x.Meta = v
+}
+func (x *AddRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *AddRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
+ out.RawString(prefix)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "parentId":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.ParentId = f
+ }
+ case "meta":
+ {
+ var f KeyValue
+ var list []KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = KeyValue{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddRequest struct {
+ Body *AddRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddRequest)(nil)
+ _ json.Marshaler = (*AddRequest)(nil)
+ _ json.Unmarshaler = (*AddRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *AddRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -89,71 +384,333 @@ func (x *AddRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddRequest) GetBody() *AddRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddRequest) SetBody(v *AddRequest_Body) {
+ x.Body = v
+}
+func (x *AddRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddRequest_Body
+ f = new(AddRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddResponse_Body struct {
+ NodeId uint64 `json:"nodeId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddResponse_Body)(nil)
+ _ json.Marshaler = (*AddResponse_Body)(nil)
+ _ json.Unmarshaler = (*AddResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *AddResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.UInt64Size(1, x.NodeId)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.NodeId != 0 {
+ mm.AppendUint64(1, x.NodeId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = data
+ }
+ }
+ return nil
+}
+func (x *AddResponse_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+func (x *AddResponse_Body) SetNodeId(v uint64) {
+ x.NodeId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodeId":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.NodeId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddResponse struct {
+ Body *AddResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddResponse)(nil)
+ _ json.Marshaler = (*AddResponse)(nil)
+ _ json.Unmarshaler = (*AddResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *AddResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -169,85 +726,558 @@ func (x *AddResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddResponse) GetBody() *AddResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddResponse) SetBody(v *AddResponse_Body) {
+ x.Body = v
+}
+func (x *AddResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddResponse_Body
+ f = new(AddResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddByPathRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ PathAttribute string `json:"pathAttribute"`
+ Path []string `json:"path"`
+ Meta []KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddByPathRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddByPathRequest_Body)(nil)
+ _ json.Marshaler = (*AddByPathRequest_Body)(nil)
+ _ json.Unmarshaler = (*AddByPathRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *AddByPathRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
size += proto.StringSize(3, x.PathAttribute)
size += proto.RepeatedStringSize(4, x.Path)
for i := range x.Meta {
- size += proto.NestedStructureSize(5, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i])
}
size += proto.BytesSize(6, x.BearerToken)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddByPathRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.StringMarshal(3, buf[offset:], x.PathAttribute)
- offset += proto.RepeatedStringMarshal(4, buf[offset:], x.Path)
- for i := range x.Meta {
- offset += proto.NestedStructureMarshal(5, buf[offset:], x.Meta[i])
- }
- offset += proto.BytesMarshal(6, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddByPathRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if len(x.PathAttribute) != 0 {
+ mm.AppendString(3, x.PathAttribute)
+ }
+ for j := range x.Path {
+ mm.AppendString(4, x.Path[j])
+ }
+ for i := range x.Meta {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(6, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddByPathRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // PathAttribute
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "PathAttribute")
+ }
+ x.PathAttribute = data
+ case 4: // Path
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Path")
+ }
+ x.Path = append(x.Path, data)
+ case 5: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 6: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *AddByPathRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *AddByPathRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *AddByPathRequest_Body) GetPathAttribute() string {
+ if x != nil {
+ return x.PathAttribute
+ }
+ return ""
+}
+func (x *AddByPathRequest_Body) SetPathAttribute(v string) {
+ x.PathAttribute = v
+}
+func (x *AddByPathRequest_Body) GetPath() []string {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) SetPath(v []string) {
+ x.Path = v
+}
+func (x *AddByPathRequest_Body) GetMeta() []KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) SetMeta(v []KeyValue) {
+ x.Meta = v
+}
+func (x *AddByPathRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddByPathRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"pathAttribute\":"
+ out.RawString(prefix)
+ out.String(x.PathAttribute)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"path\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Path {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Path[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
+ out.RawString(prefix)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddByPathRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "pathAttribute":
+ {
+ var f string
+ f = in.String()
+ x.PathAttribute = f
+ }
+ case "path":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Path = list
+ in.Delim(']')
+ }
+ case "meta":
+ {
+ var f KeyValue
+ var list []KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = KeyValue{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddByPathRequest struct {
+ Body *AddByPathRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddByPathRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddByPathRequest)(nil)
+ _ json.Marshaler = (*AddByPathRequest)(nil)
+ _ json.Unmarshaler = (*AddByPathRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *AddByPathRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddByPathRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -263,17 +1293,183 @@ func (x *AddByPathRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddByPathRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddByPathRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddByPathRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddByPathRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddByPathRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddByPathRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddByPathRequest) GetBody() *AddByPathRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddByPathRequest) SetBody(v *AddByPathRequest_Body) {
+ x.Body = v
+}
+func (x *AddByPathRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddByPathRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddByPathRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddByPathRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddByPathRequest_Body
+ f = new(AddByPathRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddByPathResponse_Body struct {
+ Nodes []uint64 `json:"nodes"`
+ ParentId uint64 `json:"parentId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddByPathResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddByPathResponse_Body)(nil)
+ _ json.Marshaler = (*AddByPathResponse_Body)(nil)
+ _ json.Unmarshaler = (*AddByPathResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *AddByPathResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
var n int
n, _ = proto.RepeatedUInt64Size(1, x.Nodes)
size += n
@@ -281,57 +1477,211 @@ func (x *AddByPathResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddByPathResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedUInt64Marshal(1, buf[offset:], x.Nodes)
- offset += proto.UInt64Marshal(2, buf[offset:], x.ParentId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddByPathResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Nodes) != 0 {
+ mm.AppendUint64s(1, x.Nodes)
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(2, x.ParentId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddByPathResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Nodes
+ data, ok := fc.UnpackUint64s(nil)
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Nodes")
+ }
+ x.Nodes = data
+ case 2: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ }
+ }
+ return nil
+}
+func (x *AddByPathResponse_Body) GetNodes() []uint64 {
+ if x != nil {
+ return x.Nodes
+ }
+ return nil
+}
+func (x *AddByPathResponse_Body) SetNodes(v []uint64) {
+ x.Nodes = v
+}
+func (x *AddByPathResponse_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *AddByPathResponse_Body) SetParentId(v uint64) {
+ x.ParentId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddByPathResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodes\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Nodes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Nodes[i], 10)
+ out.RawByte('"')
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddByPathResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodes":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Nodes = list
+ in.Delim(']')
+ }
+ case "parentId":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.ParentId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddByPathResponse struct {
+ Body *AddByPathResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddByPathResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddByPathResponse)(nil)
+ _ json.Marshaler = (*AddByPathResponse)(nil)
+ _ json.Unmarshaler = (*AddByPathResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *AddByPathResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddByPathResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -347,17 +1697,185 @@ func (x *AddByPathResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddByPathResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddByPathResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddByPathResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddByPathResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddByPathResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddByPathResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddByPathResponse) GetBody() *AddByPathResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddByPathResponse) SetBody(v *AddByPathResponse_Body) {
+ x.Body = v
+}
+func (x *AddByPathResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddByPathResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddByPathResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddByPathResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddByPathResponse_Body
+ f = new(AddByPathResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ NodeId uint64 `json:"nodeId"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *RemoveRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
size += proto.UInt64Size(3, x.NodeId)
@@ -365,59 +1883,275 @@ func (x *RemoveRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.NodeId)
- offset += proto.BytesMarshal(4, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.NodeId != 0 {
+ mm.AppendUint64(3, x.NodeId)
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(4, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = data
+ case 4: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *RemoveRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *RemoveRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *RemoveRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *RemoveRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *RemoveRequest_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+func (x *RemoveRequest_Body) SetNodeId(v uint64) {
+ x.NodeId = v
+}
+func (x *RemoveRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *RemoveRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
+ out.RawString(prefix)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "nodeId":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.NodeId = f
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveRequest struct {
+ Body *RemoveRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveRequest)(nil)
+ _ json.Marshaler = (*RemoveRequest)(nil)
+ _ json.Unmarshaler = (*RemoveRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *RemoveRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -433,62 +2167,283 @@ func (x *RemoveRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveRequest) GetBody() *RemoveRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveRequest) SetBody(v *RemoveRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveRequest_Body
+ f = new(RemoveRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *RemoveResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveResponse struct {
+ Body *RemoveResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveResponse)(nil)
+ _ json.Marshaler = (*RemoveResponse)(nil)
+ _ json.Unmarshaler = (*RemoveResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *RemoveResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -504,85 +2459,564 @@ func (x *RemoveResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveResponse) GetBody() *RemoveResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveResponse) SetBody(v *RemoveResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveResponse_Body
+ f = new(RemoveResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type MoveRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ ParentId uint64 `json:"parentId"`
+ NodeId uint64 `json:"nodeId"`
+ Meta []KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*MoveRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*MoveRequest_Body)(nil)
+ _ json.Marshaler = (*MoveRequest_Body)(nil)
+ _ json.Unmarshaler = (*MoveRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *MoveRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
size += proto.UInt64Size(3, x.ParentId)
size += proto.UInt64Size(4, x.NodeId)
for i := range x.Meta {
- size += proto.NestedStructureSize(5, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i])
}
size += proto.BytesSize(6, x.BearerToken)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *MoveRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.ParentId)
- offset += proto.UInt64Marshal(4, buf[offset:], x.NodeId)
- for i := range x.Meta {
- offset += proto.NestedStructureMarshal(5, buf[offset:], x.Meta[i])
- }
- offset += proto.BytesMarshal(6, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *MoveRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *MoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(3, x.ParentId)
+ }
+ if x.NodeId != 0 {
+ mm.AppendUint64(4, x.NodeId)
+ }
+ for i := range x.Meta {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(6, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *MoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "MoveRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ case 4: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = data
+ case 5: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 6: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *MoveRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *MoveRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *MoveRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *MoveRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *MoveRequest_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *MoveRequest_Body) SetParentId(v uint64) {
+ x.ParentId = v
+}
+func (x *MoveRequest_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+func (x *MoveRequest_Body) SetNodeId(v uint64) {
+ x.NodeId = v
+}
+func (x *MoveRequest_Body) GetMeta() []KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *MoveRequest_Body) SetMeta(v []KeyValue) {
+ x.Meta = v
+}
+func (x *MoveRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *MoveRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *MoveRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
+ out.RawString(prefix)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *MoveRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "parentId":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.ParentId = f
+ }
+ case "nodeId":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.NodeId = f
+ }
+ case "meta":
+ {
+ var f KeyValue
+ var list []KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = KeyValue{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type MoveRequest struct {
+ Body *MoveRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*MoveRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*MoveRequest)(nil)
+ _ json.Marshaler = (*MoveRequest)(nil)
+ _ json.Unmarshaler = (*MoveRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *MoveRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *MoveRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -598,62 +3032,283 @@ func (x *MoveRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *MoveRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *MoveRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *MoveRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *MoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *MoveRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "MoveRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(MoveRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *MoveRequest) GetBody() *MoveRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *MoveRequest) SetBody(v *MoveRequest_Body) {
+ x.Body = v
+}
+func (x *MoveRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *MoveRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *MoveRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *MoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *MoveRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *MoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *MoveRequest_Body
+ f = new(MoveRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type MoveResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*MoveResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*MoveResponse_Body)(nil)
+ _ json.Marshaler = (*MoveResponse_Body)(nil)
+ _ json.Unmarshaler = (*MoveResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *MoveResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *MoveResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *MoveResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *MoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *MoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "MoveResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *MoveResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *MoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *MoveResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *MoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type MoveResponse struct {
+ Body *MoveResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*MoveResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*MoveResponse)(nil)
+ _ json.Marshaler = (*MoveResponse)(nil)
+ _ json.Unmarshaler = (*MoveResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *MoveResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *MoveResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -669,17 +3324,189 @@ func (x *MoveResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *MoveResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *MoveResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *MoveResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *MoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *MoveResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "MoveResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(MoveResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *MoveResponse) GetBody() *MoveResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *MoveResponse) SetBody(v *MoveResponse_Body) {
+ x.Body = v
+}
+func (x *MoveResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *MoveResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *MoveResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *MoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *MoveResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *MoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *MoveResponse_Body
+ f = new(MoveResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ PathAttribute string `json:"pathAttribute"`
+ Path []string `json:"path"`
+ Attributes []string `json:"attributes"`
+ LatestOnly bool `json:"latestOnly"`
+ AllAttributes bool `json:"allAttributes"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest_Body)(nil)
+ _ json.Marshaler = (*GetNodeByPathRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetNodeByPathRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
size += proto.StringSize(3, x.PathAttribute)
@@ -691,63 +3518,429 @@ func (x *GetNodeByPathRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.StringMarshal(3, buf[offset:], x.PathAttribute)
- offset += proto.RepeatedStringMarshal(4, buf[offset:], x.Path)
- offset += proto.RepeatedStringMarshal(5, buf[offset:], x.Attributes)
- offset += proto.BoolMarshal(6, buf[offset:], x.LatestOnly)
- offset += proto.BoolMarshal(7, buf[offset:], x.AllAttributes)
- offset += proto.BytesMarshal(8, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if len(x.PathAttribute) != 0 {
+ mm.AppendString(3, x.PathAttribute)
+ }
+ for j := range x.Path {
+ mm.AppendString(4, x.Path[j])
+ }
+ for j := range x.Attributes {
+ mm.AppendString(5, x.Attributes[j])
+ }
+ if x.LatestOnly {
+ mm.AppendBool(6, x.LatestOnly)
+ }
+ if x.AllAttributes {
+ mm.AppendBool(7, x.AllAttributes)
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(8, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // PathAttribute
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "PathAttribute")
+ }
+ x.PathAttribute = data
+ case 4: // Path
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Path")
+ }
+ x.Path = append(x.Path, data)
+ case 5: // Attributes
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Attributes")
+ }
+ x.Attributes = append(x.Attributes, data)
+ case 6: // LatestOnly
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "LatestOnly")
+ }
+ x.LatestOnly = data
+ case 7: // AllAttributes
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "AllAttributes")
+ }
+ x.AllAttributes = data
+ case 8: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *GetNodeByPathRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *GetNodeByPathRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *GetNodeByPathRequest_Body) GetPathAttribute() string {
+ if x != nil {
+ return x.PathAttribute
+ }
+ return ""
+}
+func (x *GetNodeByPathRequest_Body) SetPathAttribute(v string) {
+ x.PathAttribute = v
+}
+func (x *GetNodeByPathRequest_Body) GetPath() []string {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) SetPath(v []string) {
+ x.Path = v
+}
+func (x *GetNodeByPathRequest_Body) GetAttributes() []string {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) SetAttributes(v []string) {
+ x.Attributes = v
+}
+func (x *GetNodeByPathRequest_Body) GetLatestOnly() bool {
+ if x != nil {
+ return x.LatestOnly
+ }
+ return false
+}
+func (x *GetNodeByPathRequest_Body) SetLatestOnly(v bool) {
+ x.LatestOnly = v
+}
+func (x *GetNodeByPathRequest_Body) GetAllAttributes() bool {
+ if x != nil {
+ return x.AllAttributes
+ }
+ return false
+}
+func (x *GetNodeByPathRequest_Body) SetAllAttributes(v bool) {
+ x.AllAttributes = v
+}
+func (x *GetNodeByPathRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"pathAttribute\":"
+ out.RawString(prefix)
+ out.String(x.PathAttribute)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"path\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Path {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Path[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"attributes\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Attributes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Attributes[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"latestOnly\":"
+ out.RawString(prefix)
+ out.Bool(x.LatestOnly)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"allAttributes\":"
+ out.RawString(prefix)
+ out.Bool(x.AllAttributes)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
+ out.RawString(prefix)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "pathAttribute":
+ {
+ var f string
+ f = in.String()
+ x.PathAttribute = f
+ }
+ case "path":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Path = list
+ in.Delim(']')
+ }
+ case "attributes":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Attributes = list
+ in.Delim(']')
+ }
+ case "latestOnly":
+ {
+ var f bool
+ f = in.Bool()
+ x.LatestOnly = f
+ }
+ case "allAttributes":
+ {
+ var f bool
+ f = in.Bool()
+ x.AllAttributes = f
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathRequest struct {
+ Body *GetNodeByPathRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest)(nil)
+ _ json.Marshaler = (*GetNodeByPathRequest)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetNodeByPathRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -763,113 +3956,638 @@ func (x *GetNodeByPathRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetNodeByPathRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetNodeByPathRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetNodeByPathRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest) GetBody() *GetNodeByPathRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest) SetBody(v *GetNodeByPathRequest_Body) {
+ x.Body = v
+}
+func (x *GetNodeByPathRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetNodeByPathRequest_Body
+ f = new(GetNodeByPathRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathResponse_Info struct {
+ NodeId uint64 `json:"nodeId"`
+ Timestamp uint64 `json:"timestamp"`
+ Meta []KeyValue `json:"meta"`
+ ParentId uint64 `json:"parentId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Info)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Info)(nil)
+ _ json.Marshaler = (*GetNodeByPathResponse_Info)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathResponse_Info)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetNodeByPathResponse_Info) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.UInt64Size(1, x.NodeId)
size += proto.UInt64Size(2, x.Timestamp)
for i := range x.Meta {
- size += proto.NestedStructureSize(3, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(3, &x.Meta[i])
}
size += proto.UInt64Size(4, x.ParentId)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathResponse_Info) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId)
- offset += proto.UInt64Marshal(2, buf[offset:], x.Timestamp)
- for i := range x.Meta {
- offset += proto.NestedStructureMarshal(3, buf[offset:], x.Meta[i])
- }
- offset += proto.UInt64Marshal(4, buf[offset:], x.ParentId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathResponse_Info) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathResponse_Info) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.NodeId != 0 {
+ mm.AppendUint64(1, x.NodeId)
+ }
+ if x.Timestamp != 0 {
+ mm.AppendUint64(2, x.Timestamp)
+ }
+ for i := range x.Meta {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(3))
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(4, x.ParentId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathResponse_Info) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Info")
+ }
+ switch fc.FieldNum {
+ case 1: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = data
+ case 2: // Timestamp
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Timestamp")
+ }
+ x.Timestamp = data
+ case 3: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 4: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse_Info) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+func (x *GetNodeByPathResponse_Info) SetNodeId(v uint64) {
+ x.NodeId = v
+}
+func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+func (x *GetNodeByPathResponse_Info) SetTimestamp(v uint64) {
+ x.Timestamp = v
+}
+func (x *GetNodeByPathResponse_Info) GetMeta() []KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse_Info) SetMeta(v []KeyValue) {
+ x.Meta = v
+}
+func (x *GetNodeByPathResponse_Info) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *GetNodeByPathResponse_Info) SetParentId(v uint64) {
+ x.ParentId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathResponse_Info) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"timestamp\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathResponse_Info) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodeId":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.NodeId = f
+ }
+ case "timestamp":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.Timestamp = f
+ }
+ case "meta":
+ {
+ var f KeyValue
+ var list []KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = KeyValue{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ case "parentId":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.ParentId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathResponse_Body struct {
+ Nodes []GetNodeByPathResponse_Info `json:"nodes"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Body)(nil)
+ _ json.Marshaler = (*GetNodeByPathResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetNodeByPathResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
for i := range x.Nodes {
- size += proto.NestedStructureSize(1, x.Nodes[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Nodes[i])
}
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- for i := range x.Nodes {
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Nodes[i])
- }
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Nodes {
+ x.Nodes[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Nodes
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Nodes")
+ }
+ x.Nodes = append(x.Nodes, GetNodeByPathResponse_Info{})
+ ff := &x.Nodes[len(x.Nodes)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse_Body) GetNodes() []GetNodeByPathResponse_Info {
+ if x != nil {
+ return x.Nodes
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse_Body) SetNodes(v []GetNodeByPathResponse_Info) {
+ x.Nodes = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodes\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Nodes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Nodes[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodes":
+ {
+ var f GetNodeByPathResponse_Info
+ var list []GetNodeByPathResponse_Info
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = GetNodeByPathResponse_Info{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Nodes = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathResponse struct {
+ Body *GetNodeByPathResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse)(nil)
+ _ json.Marshaler = (*GetNodeByPathResponse)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetNodeByPathResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -885,79 +4603,749 @@ func (x *GetNodeByPathResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetNodeByPathResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetNodeByPathResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetNodeByPathResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse) GetBody() *GetNodeByPathResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse) SetBody(v *GetNodeByPathResponse_Body) {
+ x.Body = v
+}
+func (x *GetNodeByPathResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetNodeByPathResponse_Body
+ f = new(GetNodeByPathResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeRequest_Body_Order_Direction int32
+
+const (
+ GetSubTreeRequest_Body_Order_None GetSubTreeRequest_Body_Order_Direction = 0
+ GetSubTreeRequest_Body_Order_Asc GetSubTreeRequest_Body_Order_Direction = 1
+)
+
+var (
+ GetSubTreeRequest_Body_Order_Direction_name = map[int32]string{
+ 0: "None",
+ 1: "Asc",
+ }
+ GetSubTreeRequest_Body_Order_Direction_value = map[string]int32{
+ "None": 0,
+ "Asc": 1,
+ }
+)
+
+func (x GetSubTreeRequest_Body_Order_Direction) String() string {
+ if v, ok := GetSubTreeRequest_Body_Order_Direction_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *GetSubTreeRequest_Body_Order_Direction) FromString(s string) bool {
+ if v, ok := GetSubTreeRequest_Body_Order_Direction_value[s]; ok {
+ *x = GetSubTreeRequest_Body_Order_Direction(v)
+ return true
+ }
+ return false
+}
+
+type GetSubTreeRequest_Body_Order struct {
+ Direction GetSubTreeRequest_Body_Order_Direction `json:"direction"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body_Order)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body_Order)(nil)
+ _ json.Marshaler = (*GetSubTreeRequest_Body_Order)(nil)
+ _ json.Unmarshaler = (*GetSubTreeRequest_Body_Order)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *GetSubTreeRequest_Body_Order) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.EnumSize(1, int32(x.Direction))
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeRequest_Body_Order) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetSubTreeRequest_Body_Order) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.Direction) != 0 {
+ mm.AppendInt32(1, int32(x.Direction))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeRequest_Body_Order) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body_Order")
+ }
+ switch fc.FieldNum {
+ case 1: // Direction
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Direction")
+ }
+ x.Direction = GetSubTreeRequest_Body_Order_Direction(data)
+ }
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body_Order) GetDirection() GetSubTreeRequest_Body_Order_Direction {
+ if x != nil {
+ return x.Direction
+ }
+ return 0
+}
+func (x *GetSubTreeRequest_Body_Order) SetDirection(v GetSubTreeRequest_Body_Order_Direction) {
+ x.Direction = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeRequest_Body_Order) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeRequest_Body_Order) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"direction\":"
+ out.RawString(prefix)
+ v := int32(x.Direction)
+ if vv, ok := GetSubTreeRequest_Body_Order_Direction_name[v]; ok {
+ out.String(vv)
+ } else {
+ out.Int32(v)
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeRequest_Body_Order) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeRequest_Body_Order) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "direction":
+ {
+ var f GetSubTreeRequest_Body_Order_Direction
+ var parsedValue GetSubTreeRequest_Body_Order_Direction
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := GetSubTreeRequest_Body_Order_Direction_value[v]; ok {
+ parsedValue = GetSubTreeRequest_Body_Order_Direction(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = GetSubTreeRequest_Body_Order_Direction(vv)
+ case float64:
+ parsedValue = GetSubTreeRequest_Body_Order_Direction(v)
+ }
+ f = parsedValue
+ x.Direction = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ RootId []uint64 `json:"rootId"`
+ Depth uint32 `json:"depth"`
+ BearerToken []byte `json:"bearerToken"`
+ OrderBy *GetSubTreeRequest_Body_Order `json:"orderBy"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body)(nil)
+ _ json.Marshaler = (*GetSubTreeRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetSubTreeRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetSubTreeRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
- size += proto.UInt64Size(3, x.RootId)
+ for i := range x.RootId {
+ size += protowire.SizeGroup(protowire.Number(3), protowire.SizeVarint(x.RootId[i]))
+ }
size += proto.UInt32Size(4, x.Depth)
size += proto.BytesSize(5, x.BearerToken)
+ size += proto.NestedStructureSize(6, x.OrderBy)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetSubTreeRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.RootId)
- offset += proto.UInt32Marshal(4, buf[offset:], x.Depth)
- offset += proto.BytesMarshal(5, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetSubTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ for j := range x.RootId {
+ mm.AppendUint64(3, x.RootId[j])
+ }
+ if x.Depth != 0 {
+ mm.AppendUint32(4, x.Depth)
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(5, x.BearerToken)
+ }
+ if x.OrderBy != nil {
+ x.OrderBy.EmitProtobuf(mm.AppendMessage(6))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // RootId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RootId")
+ }
+ x.RootId = append(x.RootId, data)
+ case 4: // Depth
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Depth")
+ }
+ x.Depth = data
+ case 5: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ case 6: // OrderBy
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "OrderBy")
+ }
+ x.OrderBy = new(GetSubTreeRequest_Body_Order)
+ if err := x.OrderBy.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *GetSubTreeRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *GetSubTreeRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *GetSubTreeRequest_Body) GetRootId() []uint64 {
+ if x != nil {
+ return x.RootId
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body) SetRootId(v []uint64) {
+ x.RootId = v
+}
+func (x *GetSubTreeRequest_Body) GetDepth() uint32 {
+ if x != nil {
+ return x.Depth
+ }
+ return 0
+}
+func (x *GetSubTreeRequest_Body) SetDepth(v uint32) {
+ x.Depth = v
+}
+func (x *GetSubTreeRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+func (x *GetSubTreeRequest_Body) GetOrderBy() *GetSubTreeRequest_Body_Order {
+ if x != nil {
+ return x.OrderBy
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body) SetOrderBy(v *GetSubTreeRequest_Body_Order) {
+ x.OrderBy = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"rootId\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.RootId {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.RootId[i], 10)
+ out.RawByte('"')
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"depth\":"
+ out.RawString(prefix)
+ out.Uint32(x.Depth)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"bearerToken\":"
+ out.RawString(prefix)
+ if x.BearerToken != nil {
+ out.Base64Bytes(x.BearerToken)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"orderBy\":"
+ out.RawString(prefix)
+ x.OrderBy.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "rootId":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.RootId = list
+ in.Delim(']')
+ }
+ case "depth":
+ {
+ var f uint32
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint32(v)
+ f = pv
+ x.Depth = f
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.BearerToken = f
+ }
+ case "orderBy":
+ {
+ var f *GetSubTreeRequest_Body_Order
+ f = new(GetSubTreeRequest_Body_Order)
+ f.UnmarshalEasyJSON(in)
+ x.OrderBy = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeRequest struct {
+ Body *GetSubTreeRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest)(nil)
+ _ json.Marshaler = (*GetSubTreeRequest)(nil)
+ _ json.Unmarshaler = (*GetSubTreeRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetSubTreeRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetSubTreeRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -973,81 +5361,530 @@ func (x *GetSubTreeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetSubTreeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetSubTreeRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetSubTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetSubTreeRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetSubTreeRequest) GetBody() *GetSubTreeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetSubTreeRequest) SetBody(v *GetSubTreeRequest_Body) {
+ x.Body = v
+}
+func (x *GetSubTreeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetSubTreeRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetSubTreeRequest_Body
+ f = new(GetSubTreeRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeResponse_Body struct {
+ NodeId []uint64 `json:"nodeId"`
+ ParentId []uint64 `json:"parentId"`
+ Timestamp []uint64 `json:"timestamp"`
+ Meta []KeyValue `json:"meta"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse_Body)(nil)
+ _ json.Marshaler = (*GetSubTreeResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetSubTreeResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetSubTreeResponse_Body) StableSize() (size int) {
- size += proto.UInt64Size(1, x.NodeId)
- size += proto.UInt64Size(2, x.ParentId)
- size += proto.UInt64Size(3, x.Timestamp)
+ if x == nil {
+ return 0
+ }
+ for i := range x.NodeId {
+ size += protowire.SizeGroup(protowire.Number(1), protowire.SizeVarint(x.NodeId[i]))
+ }
+ for i := range x.ParentId {
+ size += protowire.SizeGroup(protowire.Number(2), protowire.SizeVarint(x.ParentId[i]))
+ }
+ for i := range x.Timestamp {
+ size += protowire.SizeGroup(protowire.Number(3), protowire.SizeVarint(x.Timestamp[i]))
+ }
for i := range x.Meta {
- size += proto.NestedStructureSize(4, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i])
}
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetSubTreeResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId)
- offset += proto.UInt64Marshal(2, buf[offset:], x.ParentId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.Timestamp)
- for i := range x.Meta {
- offset += proto.NestedStructureMarshal(4, buf[offset:], x.Meta[i])
- }
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetSubTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.NodeId {
+ mm.AppendUint64(1, x.NodeId[j])
+ }
+ for j := range x.ParentId {
+ mm.AppendUint64(2, x.ParentId[j])
+ }
+ for j := range x.Timestamp {
+ mm.AppendUint64(3, x.Timestamp[j])
+ }
+ for i := range x.Meta {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = append(x.NodeId, data)
+ case 2: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = append(x.ParentId, data)
+ case 3: // Timestamp
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Timestamp")
+ }
+ x.Timestamp = append(x.Timestamp, data)
+ case 4: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) GetNodeId() []uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) SetNodeId(v []uint64) {
+ x.NodeId = v
+}
+func (x *GetSubTreeResponse_Body) GetParentId() []uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) SetParentId(v []uint64) {
+ x.ParentId = v
+}
+func (x *GetSubTreeResponse_Body) GetTimestamp() []uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) SetTimestamp(v []uint64) {
+ x.Timestamp = v
+}
+func (x *GetSubTreeResponse_Body) GetMeta() []KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) SetMeta(v []KeyValue) {
+ x.Meta = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"nodeId\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.NodeId {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId[i], 10)
+ out.RawByte('"')
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentId\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.ParentId {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId[i], 10)
+ out.RawByte('"')
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"timestamp\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Timestamp {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp[i], 10)
+ out.RawByte('"')
+ }
+ out.RawByte(']')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodeId":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.NodeId = list
+ in.Delim(']')
+ }
+ case "parentId":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.ParentId = list
+ in.Delim(']')
+ }
+ case "timestamp":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Timestamp = list
+ in.Delim(']')
+ }
+ case "meta":
+ {
+ var f KeyValue
+ var list []KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = KeyValue{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeResponse struct {
+ Body *GetSubTreeResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse)(nil)
+ _ json.Marshaler = (*GetSubTreeResponse)(nil)
+ _ json.Unmarshaler = (*GetSubTreeResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetSubTreeResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetSubTreeResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1063,71 +5900,333 @@ func (x *GetSubTreeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetSubTreeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetSubTreeResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetSubTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetSubTreeResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetSubTreeResponse) GetBody() *GetSubTreeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetSubTreeResponse) SetBody(v *GetSubTreeResponse_Body) {
+ x.Body = v
+}
+func (x *GetSubTreeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetSubTreeResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetSubTreeResponse_Body
+ f = new(GetSubTreeResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TreeListRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TreeListRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*TreeListRequest_Body)(nil)
+ _ json.Marshaler = (*TreeListRequest_Body)(nil)
+ _ json.Unmarshaler = (*TreeListRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *TreeListRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TreeListRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TreeListRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TreeListRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TreeListRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TreeListRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ }
+ }
+ return nil
+}
+func (x *TreeListRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *TreeListRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TreeListRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TreeListRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TreeListRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TreeListRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TreeListRequest struct {
+ Body *TreeListRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TreeListRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*TreeListRequest)(nil)
+ _ json.Marshaler = (*TreeListRequest)(nil)
+ _ json.Unmarshaler = (*TreeListRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *TreeListRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TreeListRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1143,71 +6242,337 @@ func (x *TreeListRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TreeListRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *TreeListRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TreeListRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TreeListRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TreeListRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TreeListRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(TreeListRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *TreeListRequest) GetBody() *TreeListRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *TreeListRequest) SetBody(v *TreeListRequest_Body) {
+ x.Body = v
+}
+func (x *TreeListRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *TreeListRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TreeListRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TreeListRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TreeListRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TreeListRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *TreeListRequest_Body
+ f = new(TreeListRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TreeListResponse_Body struct {
+ Ids []string `json:"ids"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TreeListResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*TreeListResponse_Body)(nil)
+ _ json.Marshaler = (*TreeListResponse_Body)(nil)
+ _ json.Unmarshaler = (*TreeListResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *TreeListResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.RepeatedStringSize(1, x.Ids)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TreeListResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedStringMarshal(1, buf[offset:], x.Ids)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TreeListResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TreeListResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Ids {
+ mm.AppendString(1, x.Ids[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TreeListResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TreeListResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Ids
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Ids")
+ }
+ x.Ids = append(x.Ids, data)
+ }
+ }
+ return nil
+}
+func (x *TreeListResponse_Body) GetIds() []string {
+ if x != nil {
+ return x.Ids
+ }
+ return nil
+}
+func (x *TreeListResponse_Body) SetIds(v []string) {
+ x.Ids = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TreeListResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TreeListResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"ids\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Ids {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Ids[i])
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TreeListResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TreeListResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "ids":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Ids = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TreeListResponse struct {
+ Body *TreeListResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TreeListResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*TreeListResponse)(nil)
+ _ json.Marshaler = (*TreeListResponse)(nil)
+ _ json.Unmarshaler = (*TreeListResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *TreeListResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TreeListResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1223,75 +6588,409 @@ func (x *TreeListResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TreeListResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *TreeListResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TreeListResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TreeListResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TreeListResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TreeListResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(TreeListResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *TreeListResponse) GetBody() *TreeListResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *TreeListResponse) SetBody(v *TreeListResponse_Body) {
+ x.Body = v
+}
+func (x *TreeListResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *TreeListResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TreeListResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TreeListResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TreeListResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TreeListResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *TreeListResponse_Body
+ f = new(TreeListResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ApplyRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ Operation *LogMove `json:"operation"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ApplyRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ApplyRequest_Body)(nil)
+ _ json.Marshaler = (*ApplyRequest_Body)(nil)
+ _ json.Unmarshaler = (*ApplyRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *ApplyRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
size += proto.NestedStructureSize(3, x.Operation)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ApplyRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.NestedStructureMarshal(3, buf[offset:], x.Operation)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ApplyRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ApplyRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.Operation != nil {
+ x.Operation.EmitProtobuf(mm.AppendMessage(3))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ApplyRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ApplyRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // Operation
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Operation")
+ }
+ x.Operation = new(LogMove)
+ if err := x.Operation.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ApplyRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *ApplyRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *ApplyRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *ApplyRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *ApplyRequest_Body) GetOperation() *LogMove {
+ if x != nil {
+ return x.Operation
+ }
+ return nil
+}
+func (x *ApplyRequest_Body) SetOperation(v *LogMove) {
+ x.Operation = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ApplyRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ApplyRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"operation\":"
+ out.RawString(prefix)
+ x.Operation.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ApplyRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ApplyRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "operation":
+ {
+ var f *LogMove
+ f = new(LogMove)
+ f.UnmarshalEasyJSON(in)
+ x.Operation = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ApplyRequest struct {
+ Body *ApplyRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ApplyRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ApplyRequest)(nil)
+ _ json.Marshaler = (*ApplyRequest)(nil)
+ _ json.Unmarshaler = (*ApplyRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *ApplyRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ApplyRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1307,62 +7006,283 @@ func (x *ApplyRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ApplyRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ApplyRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ApplyRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ApplyRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ApplyRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ApplyRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ApplyRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ApplyRequest) GetBody() *ApplyRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ApplyRequest) SetBody(v *ApplyRequest_Body) {
+ x.Body = v
+}
+func (x *ApplyRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ApplyRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ApplyRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ApplyRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ApplyRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ApplyRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ApplyRequest_Body
+ f = new(ApplyRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ApplyResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ApplyResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ApplyResponse_Body)(nil)
+ _ json.Marshaler = (*ApplyResponse_Body)(nil)
+ _ json.Unmarshaler = (*ApplyResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *ApplyResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ApplyResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ApplyResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ApplyResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ApplyResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ApplyResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ApplyResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ApplyResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ApplyResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ApplyResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ApplyResponse struct {
+ Body *ApplyResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ApplyResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ApplyResponse)(nil)
+ _ json.Marshaler = (*ApplyResponse)(nil)
+ _ json.Unmarshaler = (*ApplyResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *ApplyResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ApplyResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1378,17 +7298,185 @@ func (x *ApplyResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ApplyResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ApplyResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ApplyResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ApplyResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ApplyResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ApplyResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ApplyResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ApplyResponse) GetBody() *ApplyResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ApplyResponse) SetBody(v *ApplyResponse_Body) {
+ x.Body = v
+}
+func (x *ApplyResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ApplyResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ApplyResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ApplyResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ApplyResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ApplyResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ApplyResponse_Body
+ f = new(ApplyResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetOpLogRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ Height uint64 `json:"height"`
+ Count uint64 `json:"count"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetOpLogRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetOpLogRequest_Body)(nil)
+ _ json.Marshaler = (*GetOpLogRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetOpLogRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetOpLogRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
size += proto.UInt64Size(3, x.Height)
@@ -1396,59 +7484,275 @@ func (x *GetOpLogRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetOpLogRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.Height)
- offset += proto.UInt64Marshal(4, buf[offset:], x.Count)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetOpLogRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetOpLogRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.Height != 0 {
+ mm.AppendUint64(3, x.Height)
+ }
+ if x.Count != 0 {
+ mm.AppendUint64(4, x.Count)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetOpLogRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // Height
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Height")
+ }
+ x.Height = data
+ case 4: // Count
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Count")
+ }
+ x.Count = data
+ }
+ }
+ return nil
+}
+func (x *GetOpLogRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *GetOpLogRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *GetOpLogRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *GetOpLogRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *GetOpLogRequest_Body) GetHeight() uint64 {
+ if x != nil {
+ return x.Height
+ }
+ return 0
+}
+func (x *GetOpLogRequest_Body) SetHeight(v uint64) {
+ x.Height = v
+}
+func (x *GetOpLogRequest_Body) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+func (x *GetOpLogRequest_Body) SetCount(v uint64) {
+ x.Count = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetOpLogRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetOpLogRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"containerId\":"
+ out.RawString(prefix)
+ if x.ContainerId != nil {
+ out.Base64Bytes(x.ContainerId)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"height\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"count\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Count, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetOpLogRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "height":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.Height = f
+ }
+ case "count":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.Count = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetOpLogRequest struct {
+ Body *GetOpLogRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetOpLogRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetOpLogRequest)(nil)
+ _ json.Marshaler = (*GetOpLogRequest)(nil)
+ _ json.Unmarshaler = (*GetOpLogRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetOpLogRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetOpLogRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1464,71 +7768,327 @@ func (x *GetOpLogRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetOpLogRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetOpLogRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetOpLogRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetOpLogRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetOpLogRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetOpLogRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetOpLogRequest) GetBody() *GetOpLogRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetOpLogRequest) SetBody(v *GetOpLogRequest_Body) {
+ x.Body = v
+}
+func (x *GetOpLogRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetOpLogRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetOpLogRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetOpLogRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetOpLogRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetOpLogRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetOpLogRequest_Body
+ f = new(GetOpLogRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetOpLogResponse_Body struct {
+ Operation *LogMove `json:"operation"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetOpLogResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetOpLogResponse_Body)(nil)
+ _ json.Marshaler = (*GetOpLogResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetOpLogResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetOpLogResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Operation)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetOpLogResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Operation)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetOpLogResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetOpLogResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Operation != nil {
+ x.Operation.EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetOpLogResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Operation
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Operation")
+ }
+ x.Operation = new(LogMove)
+ if err := x.Operation.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetOpLogResponse_Body) GetOperation() *LogMove {
+ if x != nil {
+ return x.Operation
+ }
+ return nil
+}
+func (x *GetOpLogResponse_Body) SetOperation(v *LogMove) {
+ x.Operation = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetOpLogResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetOpLogResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"operation\":"
+ out.RawString(prefix)
+ x.Operation.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetOpLogResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetOpLogResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "operation":
+ {
+ var f *LogMove
+ f = new(LogMove)
+ f.UnmarshalEasyJSON(in)
+ x.Operation = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetOpLogResponse struct {
+ Body *GetOpLogResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetOpLogResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetOpLogResponse)(nil)
+ _ json.Marshaler = (*GetOpLogResponse)(nil)
+ _ json.Unmarshaler = (*GetOpLogResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *GetOpLogResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetOpLogResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1544,62 +8104,283 @@ func (x *GetOpLogResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetOpLogResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetOpLogResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetOpLogResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetOpLogResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetOpLogResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetOpLogResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetOpLogResponse) GetBody() *GetOpLogResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetOpLogResponse) SetBody(v *GetOpLogResponse_Body) {
+ x.Body = v
+}
+func (x *GetOpLogResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetOpLogResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetOpLogResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetOpLogResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetOpLogResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetOpLogResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetOpLogResponse_Body
+ f = new(GetOpLogResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthcheckResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthcheckResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthcheckResponse_Body)(nil)
+ _ json.Marshaler = (*HealthcheckResponse_Body)(nil)
+ _ json.Unmarshaler = (*HealthcheckResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthcheckResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthcheckResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthcheckResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthcheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthcheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthcheckResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthcheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthcheckResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthcheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthcheckResponse struct {
+ Body *HealthcheckResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthcheckResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthcheckResponse)(nil)
+ _ json.Marshaler = (*HealthcheckResponse)(nil)
+ _ json.Unmarshaler = (*HealthcheckResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthcheckResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthcheckResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1615,62 +8396,283 @@ func (x *HealthcheckResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthcheckResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthcheckResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthcheckResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthcheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthcheckResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthcheckResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthcheckResponse) GetBody() *HealthcheckResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthcheckResponse) SetBody(v *HealthcheckResponse_Body) {
+ x.Body = v
+}
+func (x *HealthcheckResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthcheckResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthcheckResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthcheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthcheckResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthcheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthcheckResponse_Body
+ f = new(HealthcheckResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthcheckRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthcheckRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthcheckRequest_Body)(nil)
+ _ json.Marshaler = (*HealthcheckRequest_Body)(nil)
+ _ json.Unmarshaler = (*HealthcheckRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthcheckRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthcheckRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthcheckRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthcheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthcheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthcheckRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthcheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthcheckRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthcheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthcheckRequest struct {
+ Body *HealthcheckRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthcheckRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthcheckRequest)(nil)
+ _ json.Marshaler = (*HealthcheckRequest)(nil)
+ _ json.Unmarshaler = (*HealthcheckRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *HealthcheckRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.NestedStructureSize(1, x.Body)
size += proto.NestedStructureSize(2, x.Signature)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthcheckRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1686,9 +8688,160 @@ func (x *HealthcheckRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthcheckRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthcheckRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthcheckRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *HealthcheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthcheckRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthcheckRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthcheckRequest) GetBody() *HealthcheckRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthcheckRequest) SetBody(v *HealthcheckRequest_Body) {
+ x.Body = v
+}
+func (x *HealthcheckRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthcheckRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthcheckRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthcheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"body\":"
+ out.RawString(prefix)
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthcheckRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthcheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthcheckRequest_Body
+ f = new(HealthcheckRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/tree/service_grpc.pb.go b/pkg/services/tree/service_grpc.pb.go
index 722fd17058..63f96e11a5 100644
--- a/pkg/services/tree/service_grpc.pb.go
+++ b/pkg/services/tree/service_grpc.pb.go
@@ -1,7 +1,10 @@
+//*
+// Service for working with CRDT tree.
+
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.21.12
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.25.0
// source: pkg/services/tree/service.proto
package tree
@@ -18,6 +21,19 @@ import (
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
+const (
+ TreeService_Add_FullMethodName = "/tree.TreeService/Add"
+ TreeService_AddByPath_FullMethodName = "/tree.TreeService/AddByPath"
+ TreeService_Remove_FullMethodName = "/tree.TreeService/Remove"
+ TreeService_Move_FullMethodName = "/tree.TreeService/Move"
+ TreeService_GetNodeByPath_FullMethodName = "/tree.TreeService/GetNodeByPath"
+ TreeService_GetSubTree_FullMethodName = "/tree.TreeService/GetSubTree"
+ TreeService_TreeList_FullMethodName = "/tree.TreeService/TreeList"
+ TreeService_Apply_FullMethodName = "/tree.TreeService/Apply"
+ TreeService_GetOpLog_FullMethodName = "/tree.TreeService/GetOpLog"
+ TreeService_Healthcheck_FullMethodName = "/tree.TreeService/Healthcheck"
+)
+
// TreeServiceClient is the client API for TreeService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
@@ -55,7 +71,7 @@ func NewTreeServiceClient(cc grpc.ClientConnInterface) TreeServiceClient {
func (c *treeServiceClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddResponse, error) {
out := new(AddResponse)
- err := c.cc.Invoke(ctx, "/tree.TreeService/Add", in, out, opts...)
+ err := c.cc.Invoke(ctx, TreeService_Add_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -64,7 +80,7 @@ func (c *treeServiceClient) Add(ctx context.Context, in *AddRequest, opts ...grp
func (c *treeServiceClient) AddByPath(ctx context.Context, in *AddByPathRequest, opts ...grpc.CallOption) (*AddByPathResponse, error) {
out := new(AddByPathResponse)
- err := c.cc.Invoke(ctx, "/tree.TreeService/AddByPath", in, out, opts...)
+ err := c.cc.Invoke(ctx, TreeService_AddByPath_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -73,7 +89,7 @@ func (c *treeServiceClient) AddByPath(ctx context.Context, in *AddByPathRequest,
func (c *treeServiceClient) Remove(ctx context.Context, in *RemoveRequest, opts ...grpc.CallOption) (*RemoveResponse, error) {
out := new(RemoveResponse)
- err := c.cc.Invoke(ctx, "/tree.TreeService/Remove", in, out, opts...)
+ err := c.cc.Invoke(ctx, TreeService_Remove_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -82,7 +98,7 @@ func (c *treeServiceClient) Remove(ctx context.Context, in *RemoveRequest, opts
func (c *treeServiceClient) Move(ctx context.Context, in *MoveRequest, opts ...grpc.CallOption) (*MoveResponse, error) {
out := new(MoveResponse)
- err := c.cc.Invoke(ctx, "/tree.TreeService/Move", in, out, opts...)
+ err := c.cc.Invoke(ctx, TreeService_Move_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -91,7 +107,7 @@ func (c *treeServiceClient) Move(ctx context.Context, in *MoveRequest, opts ...g
func (c *treeServiceClient) GetNodeByPath(ctx context.Context, in *GetNodeByPathRequest, opts ...grpc.CallOption) (*GetNodeByPathResponse, error) {
out := new(GetNodeByPathResponse)
- err := c.cc.Invoke(ctx, "/tree.TreeService/GetNodeByPath", in, out, opts...)
+ err := c.cc.Invoke(ctx, TreeService_GetNodeByPath_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -99,7 +115,7 @@ func (c *treeServiceClient) GetNodeByPath(ctx context.Context, in *GetNodeByPath
}
func (c *treeServiceClient) GetSubTree(ctx context.Context, in *GetSubTreeRequest, opts ...grpc.CallOption) (TreeService_GetSubTreeClient, error) {
- stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[0], "/tree.TreeService/GetSubTree", opts...)
+ stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[0], TreeService_GetSubTree_FullMethodName, opts...)
if err != nil {
return nil, err
}
@@ -132,7 +148,7 @@ func (x *treeServiceGetSubTreeClient) Recv() (*GetSubTreeResponse, error) {
func (c *treeServiceClient) TreeList(ctx context.Context, in *TreeListRequest, opts ...grpc.CallOption) (*TreeListResponse, error) {
out := new(TreeListResponse)
- err := c.cc.Invoke(ctx, "/tree.TreeService/TreeList", in, out, opts...)
+ err := c.cc.Invoke(ctx, TreeService_TreeList_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -141,7 +157,7 @@ func (c *treeServiceClient) TreeList(ctx context.Context, in *TreeListRequest, o
func (c *treeServiceClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
out := new(ApplyResponse)
- err := c.cc.Invoke(ctx, "/tree.TreeService/Apply", in, out, opts...)
+ err := c.cc.Invoke(ctx, TreeService_Apply_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -149,7 +165,7 @@ func (c *treeServiceClient) Apply(ctx context.Context, in *ApplyRequest, opts ..
}
func (c *treeServiceClient) GetOpLog(ctx context.Context, in *GetOpLogRequest, opts ...grpc.CallOption) (TreeService_GetOpLogClient, error) {
- stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[1], "/tree.TreeService/GetOpLog", opts...)
+ stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[1], TreeService_GetOpLog_FullMethodName, opts...)
if err != nil {
return nil, err
}
@@ -182,7 +198,7 @@ func (x *treeServiceGetOpLogClient) Recv() (*GetOpLogResponse, error) {
func (c *treeServiceClient) Healthcheck(ctx context.Context, in *HealthcheckRequest, opts ...grpc.CallOption) (*HealthcheckResponse, error) {
out := new(HealthcheckResponse)
- err := c.cc.Invoke(ctx, "/tree.TreeService/Healthcheck", in, out, opts...)
+ err := c.cc.Invoke(ctx, TreeService_Healthcheck_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -272,7 +288,7 @@ func _TreeService_Add_Handler(srv interface{}, ctx context.Context, dec func(int
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/tree.TreeService/Add",
+ FullMethod: TreeService_Add_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TreeServiceServer).Add(ctx, req.(*AddRequest))
@@ -290,7 +306,7 @@ func _TreeService_AddByPath_Handler(srv interface{}, ctx context.Context, dec fu
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/tree.TreeService/AddByPath",
+ FullMethod: TreeService_AddByPath_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TreeServiceServer).AddByPath(ctx, req.(*AddByPathRequest))
@@ -308,7 +324,7 @@ func _TreeService_Remove_Handler(srv interface{}, ctx context.Context, dec func(
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/tree.TreeService/Remove",
+ FullMethod: TreeService_Remove_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TreeServiceServer).Remove(ctx, req.(*RemoveRequest))
@@ -326,7 +342,7 @@ func _TreeService_Move_Handler(srv interface{}, ctx context.Context, dec func(in
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/tree.TreeService/Move",
+ FullMethod: TreeService_Move_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TreeServiceServer).Move(ctx, req.(*MoveRequest))
@@ -344,7 +360,7 @@ func _TreeService_GetNodeByPath_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/tree.TreeService/GetNodeByPath",
+ FullMethod: TreeService_GetNodeByPath_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TreeServiceServer).GetNodeByPath(ctx, req.(*GetNodeByPathRequest))
@@ -383,7 +399,7 @@ func _TreeService_TreeList_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/tree.TreeService/TreeList",
+ FullMethod: TreeService_TreeList_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TreeServiceServer).TreeList(ctx, req.(*TreeListRequest))
@@ -401,7 +417,7 @@ func _TreeService_Apply_Handler(srv interface{}, ctx context.Context, dec func(i
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/tree.TreeService/Apply",
+ FullMethod: TreeService_Apply_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TreeServiceServer).Apply(ctx, req.(*ApplyRequest))
@@ -440,7 +456,7 @@ func _TreeService_Healthcheck_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/tree.TreeService/Healthcheck",
+ FullMethod: TreeService_Healthcheck_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TreeServiceServer).Healthcheck(ctx, req.(*HealthcheckRequest))
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 7a5a95c4ce..8221a4546f 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -2,23 +2,23 @@ package tree
import (
"bytes"
+ "context"
"crypto/ecdsa"
"crypto/elliptic"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "go.uber.org/zap"
)
type message interface {
@@ -28,94 +28,97 @@ type message interface {
SetSignature(*Signature)
}
-func basicACLErr(op acl.Op) error {
- return fmt.Errorf("access to operation %s is denied by basic ACL check", op)
-}
-
-func eACLErr(op eacl.Operation, err error) error {
- return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err)
-}
-
-var errBearerWrongOwner = errors.New("bearer token must be signed by the container owner")
-var errBearerWrongContainer = errors.New("bearer token is created for another container")
-var errBearerSignature = errors.New("invalid bearer token signature")
+var (
+ errBearerWrongContainer = errors.New("bearer token is created for another container")
+ errBearerSignature = errors.New("invalid bearer token signature")
+)
// verifyClient verifies if the request for a client operation
// was signed by a key allowed by (e)ACL rules.
// Operation must be one of:
// - 1. ObjectPut;
// - 2. ObjectGet.
-func (s *Service) verifyClient(req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error {
+func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error {
err := verifyMessage(req)
if err != nil {
return err
}
- cnr, err := s.cnrSource.Get(cid)
+ isAuthorized, err := s.isAuthorized(req, op)
+ if isAuthorized || err != nil {
+ return err
+ }
+
+ cnr, err := s.cnrSource.Get(ctx, cid)
if err != nil {
return fmt.Errorf("can't get container %s: %w", cid, err)
}
- role, err := roleFromReq(cnr, req)
+ bt, err := parseBearer(rawBearer, cid)
+ if err != nil {
+ return fmt.Errorf("access to operation %s is denied: %w", op, err)
+ }
+
+ role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt)
if err != nil {
return fmt.Errorf("can't get request role: %w", err)
}
- basicACL := cnr.Value.BasicACL()
+ if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil {
+ return apeErr(err)
+ }
+ return nil
+}
- if !basicACL.IsOpAllowed(op, role) {
- return basicACLErr(op)
+func apeErr(err error) error {
+ var chRouterErr *checkercore.ChainRouterError
+ if !errors.As(err, &chRouterErr) {
+ errServerInternal := &apistatus.ServerInternal{}
+ apistatus.WriteInternalServerErr(errServerInternal, err)
+ return errServerInternal
+ }
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(err.Error())
+ return errAccessDenied
+}
+
+// Returns true iff the operation is read-only and request was signed
+// with one of the authorized keys.
+func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) {
+ if op != acl.OpObjectGet {
+ return false, nil
}
- if !basicACL.Extendable() {
- return nil
+ sign := req.GetSignature()
+ if sign == nil {
+ return false, errors.New("missing signature")
}
- eaclOp := eACLOp(op)
-
- var tableFromBearer bool
- if len(rawBearer) != 0 {
- if !basicACL.AllowedBearerRules(op) {
- s.log.Debug("bearer presented but not allowed by ACL",
- zap.String("cid", cid.EncodeToString()),
- zap.String("op", op.String()),
- )
- } else {
- tableFromBearer = true
+ key := sign.GetKey()
+ for _, currentKey := range *s.authorizedKeys.Load() {
+ if bytes.Equal(currentKey, key) {
+ return true, nil
}
}
+ return false, nil
+}
- var tb eacl.Table
- if tableFromBearer {
- var bt bearer.Token
- if err = bt.Unmarshal(rawBearer); err != nil {
- return eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err))
- }
- if !bearer.ResolveIssuer(bt).Equals(cnr.Value.Owner()) {
- return eACLErr(eaclOp, errBearerWrongOwner)
- }
- if !bt.AssertContainer(cid) {
- return eACLErr(eaclOp, errBearerWrongContainer)
- }
- if !bt.VerifySignature() {
- return eACLErr(eaclOp, errBearerSignature)
- }
-
- tb = bt.EACLTable()
- } else {
- tbCore, err := s.eaclSource.GetEACL(cid)
- if err != nil {
- if client.IsErrEACLNotFound(err) {
- return nil
- }
-
- return fmt.Errorf("get eACL table: %w", err)
- }
-
- tb = *tbCore.Value
+func parseBearer(rawBearer []byte, cid cidSDK.ID) (*bearer.Token, error) {
+ if len(rawBearer) == 0 {
+ return nil, nil
}
- return checkEACL(tb, req.GetSignature().GetKey(), eACLRole(role), eaclOp)
+ bt := new(bearer.Token)
+ if err := bt.Unmarshal(rawBearer); err != nil {
+ return nil, fmt.Errorf("invalid bearer token: %w", err)
+ }
+ if !bt.AssertContainer(cid) {
+ return nil, errBearerWrongContainer
+ }
+ if !bt.VerifySignature() {
+ return nil, errBearerSignature
+ }
+ return bt, nil
}
func verifyMessage(m message) error {
@@ -126,7 +129,7 @@ func verifyMessage(m message) error {
sig := m.GetSignature()
- // TODO(@cthulhu-rider): #1387 use Signature message from NeoFS API to avoid conversion
+ // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion
var sigV2 refs.Signature
sigV2.SetKey(sig.GetKey())
sigV2.SetSign(sig.GetSign())
@@ -168,13 +171,18 @@ func SignMessage(m message, key *ecdsa.PrivateKey) error {
return nil
}
-func roleFromReq(cnr *core.Container, req message) (acl.Role, error) {
+func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (acl.Role, *keys.PublicKey, error) {
role := acl.RoleOthers
owner := cnr.Value.Owner()
- pub, err := keys.NewPublicKeyFromBytes(req.GetSignature().GetKey(), elliptic.P256())
+ rawKey := req.GetSignature().GetKey()
+ if bt != nil && bt.Impersonate() {
+ rawKey = bt.SigningKeyBytes()
+ }
+
+ pub, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256())
if err != nil {
- return role, fmt.Errorf("invalid public key: %w", err)
+ return role, nil, fmt.Errorf("invalid public key: %w", err)
}
var reqSigner user.ID
@@ -184,84 +192,5 @@ func roleFromReq(cnr *core.Container, req message) (acl.Role, error) {
role = acl.RoleOwner
}
- return role, nil
-}
-
-func eACLOp(op acl.Op) eacl.Operation {
- switch op {
- case acl.OpObjectGet:
- return eacl.OperationGet
- case acl.OpObjectPut:
- return eacl.OperationPut
- default:
- panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op))
- }
-}
-
-func eACLRole(role acl.Role) eacl.Role {
- switch role {
- case acl.RoleOwner:
- return eacl.RoleUser
- case acl.RoleOthers:
- return eacl.RoleOthers
- default:
- panic(fmt.Sprintf("unexpected tree service ACL role: %s", role))
- }
-}
-
-var errDENY = errors.New("DENY eACL rule")
-var errNoAllowRules = errors.New("not found allowing rules for the request")
-
-// checkEACL searches for the eACL rules that could be applied to the request
-// (a tuple of a signer key, his FrostFS role and a request operation).
-// It does not filter the request by the filters of the eACL table since tree
-// requests do not contain any "object" information that could be filtered and,
-// therefore, filtering leads to unexpected results.
-// The code was copied with the minor updates from the SDK repo:
-// https://github.com/nspcc-dev/frostfs-sdk-go/blob/43a57d42dd50dc60465bfd3482f7f12bcfcf3411/eacl/validator.go#L28.
-func checkEACL(tb eacl.Table, signer []byte, role eacl.Role, op eacl.Operation) error {
- for _, record := range tb.Records() {
- // check type of operation
- if record.Operation() != op {
- continue
- }
-
- // check target
- if !targetMatches(record, role, signer) {
- continue
- }
-
- switch a := record.Action(); a {
- case eacl.ActionAllow:
- return nil
- case eacl.ActionDeny:
- return eACLErr(op, errDENY)
- default:
- return eACLErr(op, fmt.Errorf("unexpected action: %s", a))
- }
- }
-
- return eACLErr(op, errNoAllowRules)
-}
-
-func targetMatches(rec eacl.Record, role eacl.Role, signer []byte) bool {
- for _, target := range rec.Targets() {
- // check public key match
- if pubs := target.BinaryKeys(); len(pubs) != 0 {
- for _, key := range pubs {
- if bytes.Equal(key, signer) {
- return true
- }
- }
-
- continue
- }
-
- // check target group match
- if role == target.Role() {
- return true
- }
- }
-
- return false
+ return role, pub, nil
}
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index b336e60a2a..ca1e438cc7 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -1,35 +1,73 @@
package tree
import (
+ "context"
"crypto/ecdsa"
"crypto/sha256"
+ "encoding/hex"
"errors"
"testing"
- aclV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ aclV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+ "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
)
+const versionTreeID = "version"
+
type dummyNetmapSource struct {
netmap.Source
}
+type dummySubjectProvider struct {
+ subjects map[util.Uint160]client.SubjectExtended
+}
+
+func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
+ res := s.subjects[addr]
+ return &client.Subject{
+ PrimaryKey: res.PrimaryKey,
+ AdditionalKeys: res.AdditionalKeys,
+ Namespace: res.Namespace,
+ Name: res.Name,
+ KV: res.KV,
+ }, nil
+}
+
+func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
+ res := s.subjects[addr]
+ return &res, nil
+}
+
+type dummyEpochSource struct {
+ epoch uint64
+}
+
+func (s dummyEpochSource) CurrentEpoch() uint64 {
+ return s.epoch
+}
+
type dummyContainerSource map[string]*containercore.Container
-func (s dummyContainerSource) List() ([]cid.ID, error) {
+func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) {
res := make([]cid.ID, 0, len(s))
var cnr cid.ID
@@ -45,7 +83,7 @@ func (s dummyContainerSource) List() ([]cid.ID, error) {
return res, nil
}
-func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
+func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) {
cnt, ok := s[id.String()]
if !ok {
return nil, errors.New("container not found")
@@ -53,6 +91,10 @@ func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
return cnt, nil
}
+func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) {
+ return &containercore.DelInfo{}, nil
+}
+
func testContainer(owner user.ID) container.Container {
var r netmapSDK.ReplicaDescriptor
r.SetNumberOfObjects(1)
@@ -67,6 +109,8 @@ func testContainer(owner user.ID) container.Container {
return cnt
}
+const currentEpoch = 123
+
func TestMessageSign(t *testing.T) {
privs := make([]*keys.PrivateKey, 4)
for i := range privs {
@@ -85,17 +129,30 @@ func TestMessageSign(t *testing.T) {
Value: testContainer(ownerID),
}
+ e := inmemory.NewInMemoryLocalOverrides()
+ e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.Target{
+ Type: engine.Container,
+ Name: cid1.EncodeToString(),
+ }, testChain(privs[0].PublicKey(), privs[1].PublicKey()))
+ frostfsidProvider := dummySubjectProvider{
+ subjects: make(map[util.Uint160]client.SubjectExtended),
+ }
+
s := &Service{
cfg: cfg{
- log: &logger.Logger{Logger: zaptest.NewLogger(t)},
+ log: test.NewLogger(t),
key: &privs[0].PrivateKey,
nmSource: dummyNetmapSource{},
cnrSource: dummyContainerSource{
cid1.String(): cnr,
},
+ frostfsidSubjectProvider: frostfsidProvider,
+ state: dummyEpochSource{epoch: currentEpoch},
},
+ apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}),
}
+ s.cfg.authorizedKeys.Store(&[][]byte{})
rawCID1 := make([]byte, sha256.Size)
cid1.Encode(rawCID1)
@@ -104,7 +161,7 @@ func TestMessageSign(t *testing.T) {
ContainerId: rawCID1,
ParentId: 1,
NodeId: 2,
- Meta: []*KeyValue{
+ Meta: []KeyValue{
{Key: "kkk", Value: []byte("vvv")},
},
},
@@ -114,26 +171,26 @@ func TestMessageSign(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRW)
t.Run("missing signature, no panic", func(t *testing.T) {
- require.Error(t, s.verifyClient(req, cid2, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
})
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(req, cid1, nil, op))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
t.Run("invalid CID", func(t *testing.T) {
- require.Error(t, s.verifyClient(req, cid2, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
})
cnr.Value.SetBasicACL(acl.Private)
t.Run("extension disabled", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(req, cid2, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
})
t.Run("invalid key", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(req, cid1, nil, op))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
})
t.Run("bearer", func(t *testing.T) {
@@ -146,7 +203,7 @@ func TestMessageSign(t *testing.T) {
t.Run("invalid bearer", func(t *testing.T) {
req.Body.BearerToken = []byte{0xFF}
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer CID", func(t *testing.T) {
@@ -155,7 +212,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer owner", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -163,7 +220,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer signature", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -175,7 +232,113 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bv2.StableMarshal(nil)
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ })
+
+ t.Run("omit override within bt", func(t *testing.T) {
+ t.Run("personated", func(t *testing.T) {
+ bt := testBearerTokenNoOverride()
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
+ require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override")
+ })
+
+ t.Run("impersonated", func(t *testing.T) {
+ bt := testBearerTokenNoOverride()
+ bt.SetImpersonate(true)
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ })
+ })
+
+ t.Run("invalid override within bearer token", func(t *testing.T) {
+ t.Run("personated", func(t *testing.T) {
+ bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
+ require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
+ })
+
+ t.Run("impersonated", func(t *testing.T) {
+ bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
+ bt.SetImpersonate(true)
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
+ })
+ })
+
+ t.Run("impersonate", func(t *testing.T) {
+ cnr.Value.SetBasicACL(acl.PublicRWExtended)
+ var bt bearer.Token
+ bt.SetExp(10)
+ bt.SetImpersonate(true)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid1.EncodeToString(),
+ },
+ Chains: []ape.Chain{},
+ })
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ })
+
+ t.Run("impersonate but different signer", func(t *testing.T) {
+ var bt bearer.Token
+ bt.SetExp(10)
+ bt.SetImpersonate(true)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid1.EncodeToString(),
+ },
+ Chains: []ape.Chain{},
+ })
+ require.NoError(t, bt.Sign(privs[1].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ })
+
+ t.Run("impersonate but different issuer", func(t *testing.T) {
+ var bt bearer.Token
+ bt.SetExp(10)
+ bt.SetImpersonate(true)
+
+ differentUserPrivKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ var reqSigner user.ID
+ user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*differentUserPrivKey.PublicKey()))
+
+ bt.ForUser(reqSigner)
+ bt.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid1.EncodeToString(),
+ },
+ Chains: []ape.Chain{},
+ })
+ require.NoError(t, bt.Sign(privs[0].PrivateKey))
+ req.Body.BearerToken = bt.Marshal()
+
+ require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -185,60 +348,95 @@ func TestMessageSign(t *testing.T) {
t.Run("put and get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.NoError(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("only get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[2].PrivateKey))
- require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("none", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[3].PrivateKey))
- require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
})
}
func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token {
- tgtGet := eaclSDK.NewTarget()
- tgtGet.SetRole(eaclSDK.RoleUnknown)
- tgtGet.SetBinaryKeys([][]byte{forPutGet.Bytes(), forGet.Bytes()})
-
- rGet := eaclSDK.NewRecord()
- rGet.SetAction(eaclSDK.ActionAllow)
- rGet.SetOperation(eaclSDK.OperationGet)
- rGet.SetTargets(*tgtGet)
-
- tgtPut := eaclSDK.NewTarget()
- tgtPut.SetRole(eaclSDK.RoleUnknown)
- tgtPut.SetBinaryKeys([][]byte{forPutGet.Bytes()})
-
- rPut := eaclSDK.NewRecord()
- rPut.SetAction(eaclSDK.ActionAllow)
- rPut.SetOperation(eaclSDK.OperationPut)
- rPut.SetTargets(*tgtPut)
-
- tb := eaclSDK.NewTable()
- tb.AddRecord(rGet)
- tb.AddRecord(rPut)
-
- tgt := eaclSDK.NewTarget()
- tgt.SetRole(eaclSDK.RoleOthers)
-
- for _, op := range []eaclSDK.Operation{eaclSDK.OperationGet, eaclSDK.OperationPut} {
- r := eaclSDK.NewRecord()
- r.SetAction(eaclSDK.ActionDeny)
- r.SetTargets(*tgt)
- r.SetOperation(op)
- tb.AddRecord(r)
- }
-
- tb.SetCID(cid)
-
var b bearer.Token
- b.SetEACLTable(*tb)
+ b.SetExp(currentEpoch + 1)
+ b.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid.EncodeToString(),
+ },
+ Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
+ })
return b
}
+
+func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token {
+ var b bearer.Token
+ b.SetExp(currentEpoch + 1)
+ b.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ },
+ Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
+ })
+
+ return b
+}
+
+func testBearerTokenNoOverride() bearer.Token {
+ var b bearer.Token
+ b.SetExp(currentEpoch + 1)
+ return b
+}
+
+func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain {
+ ruleGet := chain.Rule{
+ Status: chain.Allow,
+ Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
+ Actions: chain.Actions{Names: []string{native.MethodGetObject}},
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forPutGet.Bytes()),
+ },
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forGet.Bytes()),
+ },
+ },
+ }
+ rulePut := chain.Rule{
+ Status: chain.Allow,
+ Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
+ Actions: chain.Actions{Names: []string{native.MethodPutObject}},
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forPutGet.Bytes()),
+ },
+ },
+ }
+
+ return &chain.Chain{
+ Rules: []chain.Rule{
+ ruleGet,
+ rulePut,
+ },
+ }
+}
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index a1bab61de5..1480bff928 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -2,22 +2,37 @@ package tree
import (
"context"
+ "crypto/ecdsa"
"crypto/sha256"
+ "crypto/tls"
"errors"
"fmt"
"io"
"math"
"math/rand"
"sync"
+ "sync/atomic"
+ "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
+ tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
"google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@@ -31,7 +46,7 @@ const defaultSyncWorkerCount = 20
// tree IDs from the other container nodes. Returns ErrNotInContainer if the node
// is not included in the container.
func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
- nodes, pos, err := s.getContainerNodes(cid)
+ nodes, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -40,11 +55,6 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
return ErrNotInContainer
}
- var d pilorama.CIDDescriptor
- d.CID = cid
- d.Position = pos
- d.Size = len(nodes)
-
nodes = randomizeNodeOrder(nodes, pos)
if len(nodes) == 0 {
return nil
@@ -68,8 +78,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
var treesToSync []string
var outErr error
- err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool {
- resp, outErr = c.TreeList(ctx, req)
+ err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool {
+ resp, outErr = c.TreeList(fCtx, req)
if outErr != nil {
return false
}
@@ -87,18 +97,18 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
}
for _, tid := range treesToSync {
- h, err := s.forest.TreeLastSyncHeight(d.CID, tid)
+ h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
- s.log.Warn("could not get last synchronized height for a tree",
- zap.Stringer("cid", d.CID),
+ s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree,
+ zap.Stringer("cid", cid),
zap.String("tree", tid))
continue
}
- newHeight := s.synchronizeTree(ctx, d, h, tid, nodes)
+ newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
if h < newHeight {
- if err := s.forest.TreeUpdateLastSyncHeight(d.CID, tid, newHeight); err != nil {
- s.log.Warn("could not update last synchronized height for a tree",
- zap.Stringer("cid", d.CID),
+ if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil {
+ s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
+ zap.Stringer("cid", cid),
zap.String("tree", tid))
}
}
@@ -109,7 +119,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
// SynchronizeTree tries to synchronize log starting from the last stored height.
func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error {
- nodes, pos, err := s.getContainerNodes(cid)
+ nodes, pos, err := s.getContainerNodes(ctx, cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -118,113 +128,276 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string
return ErrNotInContainer
}
- var d pilorama.CIDDescriptor
- d.CID = cid
- d.Position = pos
- d.Size = len(nodes)
-
nodes = randomizeNodeOrder(nodes, pos)
if len(nodes) == 0 {
return nil
}
- s.synchronizeTree(ctx, d, 0, treeID, nodes)
+ s.synchronizeTree(ctx, cid, 0, treeID, nodes)
return nil
}
-func (s *Service) synchronizeTree(ctx context.Context, d pilorama.CIDDescriptor, from uint64,
- treeID string, nodes []netmapSDK.NodeInfo) uint64 {
- s.log.Debug("synchronize tree",
- zap.Stringer("cid", d.CID),
- zap.String("tree", treeID),
- zap.Uint64("from", from))
+// mergeOperationStreams performs merge sort for node operation streams to one stream.
+func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
+ defer close(merged)
- newHeight := uint64(math.MaxUint64)
- for _, n := range nodes {
- height := from
- n.IterateNetworkEndpoints(func(addr string) bool {
- var a network.Address
- if err := a.FromString(addr); err != nil {
- return false
- }
+ // Merging different node streams shuffles incoming operations like that:
+ //
+ // x - operation from the stream A
+ // o - operation from the stream B
+ //
+ // --o---o--x--x--x--o---x--x------> t
+ // ^
+ // If all ops have been successfully applied, we must start from the last
+ // operation height from the stream B. This height is stored in minStreamedLastHeight.
+ var minStreamedLastHeight uint64 = math.MaxUint64
- cc, err := grpc.DialContext(ctx, a.URIAddr(), grpc.WithTransportCredentials(insecure.NewCredentials()))
- if err != nil {
- // Failed to connect, try the next address.
- return false
- }
- defer cc.Close()
-
- treeClient := NewTreeServiceClient(cc)
- for {
- h, err := s.synchronizeSingle(ctx, d, treeID, height, treeClient)
- if height < h {
- height = h
- }
- if err != nil || h <= height {
- // Error with the response, try the next node.
- return true
- }
- }
- })
- if height <= from { // do not increase starting height on fail
- newHeight = from
- } else if height < newHeight { // take minimum across all clients
- newHeight = height
+ ms := make([]*pilorama.Move, len(streams))
+ for i := range streams {
+ select {
+ case ms[i] = <-streams[i]:
+ case <-ctx.Done():
+ return minStreamedLastHeight
}
}
- if newHeight == math.MaxUint64 {
- newHeight = from
- }
- return newHeight
-}
-
-func (s *Service) synchronizeSingle(ctx context.Context, d pilorama.CIDDescriptor, treeID string, height uint64, treeClient TreeServiceClient) (uint64, error) {
- rawCID := make([]byte, sha256.Size)
- d.CID.Encode(rawCID)
for {
- newHeight := height
- req := &GetOpLogRequest{
- Body: &GetOpLogRequest_Body{
- ContainerId: rawCID,
- TreeId: treeID,
- Height: newHeight,
- },
- }
- if err := SignMessage(req, s.key); err != nil {
- return newHeight, err
+ var minTimeMoveTime uint64 = math.MaxUint64
+ minTimeMoveIndex := -1
+ for i, m := range ms {
+ if m != nil && minTimeMoveTime > m.Time {
+ minTimeMoveTime = m.Time
+ minTimeMoveIndex = i
+ }
}
- c, err := treeClient.GetOpLog(ctx, req)
- if err != nil {
- return newHeight, fmt.Errorf("can't initialize client: %w", err)
+ if minTimeMoveIndex == -1 {
+ break
}
- res, err := c.Recv()
- for ; err == nil; res, err = c.Recv() {
- lm := res.GetBody().GetOperation()
- m := &pilorama.Move{
- Parent: lm.ParentId,
- Child: lm.ChildId,
- }
- if err := m.Meta.FromBytes(lm.Meta); err != nil {
- return newHeight, err
- }
- if err := s.forest.TreeApply(d, treeID, m, true); err != nil {
- return newHeight, err
- }
- if m.Time > newHeight {
- newHeight = m.Time + 1
- } else {
- newHeight++
- }
+ select {
+ case merged <- ms[minTimeMoveIndex]:
+ case <-ctx.Done():
+ return minStreamedLastHeight
}
- if height == newHeight || err != nil && !errors.Is(err, io.EOF) {
- return newHeight, err
+ height := ms[minTimeMoveIndex].Time
+ if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil {
+ minStreamedLastHeight = min(minStreamedLastHeight, height)
}
- height = newHeight
}
+
+ return minStreamedLastHeight
+}
+
+func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string,
+ operationStream <-chan *pilorama.Move,
+) (uint64, error) {
+ var prev *pilorama.Move
+ var batch []*pilorama.Move
+ for m := range operationStream {
+ // skip already applied op
+ if prev != nil && prev.Time == m.Time {
+ continue
+ }
+ prev = m
+ batch = append(batch, m)
+
+ if len(batch) == s.syncBatchSize {
+ if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
+ return batch[0].Time, err
+ }
+ batch = batch[:0]
+ }
+ }
+ if len(batch) > 0 {
+ if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
+ return batch[0].Time, err
+ }
+ }
+ return math.MaxUint64, nil
+}
+
+func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
+ height uint64, cc *grpc.ClientConn, opsCh chan<- *pilorama.Move,
+) error {
+ treeClient := NewTreeServiceClient(cc)
+
+ rawCID := make([]byte, sha256.Size)
+ cid.Encode(rawCID)
+
+ req := &GetOpLogRequest{
+ Body: &GetOpLogRequest_Body{
+ ContainerId: rawCID,
+ TreeId: treeID,
+ Height: height,
+ },
+ }
+ if err := SignMessage(req, s.key); err != nil {
+ return err
+ }
+
+ c, err := treeClient.GetOpLog(ctx, req)
+ if err != nil {
+ return fmt.Errorf("can't initialize client: %w", err)
+ }
+ res, err := c.Recv()
+ for ; err == nil; res, err = c.Recv() {
+ lm := res.GetBody().GetOperation()
+ m := &pilorama.Move{
+ Parent: lm.GetParentId(),
+ Child: lm.GetChildId(),
+ }
+ if err := m.FromBytes(lm.GetMeta()); err != nil {
+ return err
+ }
+ select {
+ case opsCh <- m:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ if !errors.Is(err, io.EOF) {
+ return err
+ }
+ return nil
+}
+
+// synchronizeTree synchronizes operations getting them from different nodes.
+// Each available node does stream operations to a separate stream. These streams
+// are merged into one big stream ordered by operation time. This way allows to skip
+// already applied operation and keep good batching.
+// The method returns a height that service should start sync from in the next time.
+func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
+ treeID string, nodes []netmapSDK.NodeInfo,
+) uint64 {
+ s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
+
+ errGroup, egCtx := errgroup.WithContext(ctx)
+ const workersCount = 1024
+ errGroup.SetLimit(workersCount)
+
+ nodeOperationStreams := make([]chan *pilorama.Move, len(nodes))
+ for i := range nodeOperationStreams {
+ nodeOperationStreams[i] = make(chan *pilorama.Move)
+ }
+ merged := make(chan *pilorama.Move)
+ var minStreamedLastHeight uint64
+ errGroup.Go(func() error {
+ minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged)
+ return nil
+ })
+ var minUnappliedHeight uint64
+ errGroup.Go(func() error {
+ var err error
+ minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged)
+ return err
+ })
+
+ var allNodesSynced atomic.Bool
+ allNodesSynced.Store(true)
+
+ for i, n := range nodes {
+ errGroup.Go(func() error {
+ var nodeSynced bool
+ for addr := range n.NetworkEndpoints() {
+ var a network.Address
+ if err := a.FromString(addr); err != nil {
+ s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ continue
+ }
+
+ cc, err := dialTreeService(ctx, a, s.key, s.ds)
+ if err != nil {
+ s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+ continue
+ }
+
+ err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i])
+ if err != nil {
+ s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
+ }
+ nodeSynced = err == nil
+ _ = cc.Close()
+ break
+ }
+ close(nodeOperationStreams[i])
+ if !nodeSynced {
+ allNodesSynced.Store(false)
+ }
+ return nil
+ })
+ }
+ if err := errGroup.Wait(); err != nil {
+ allNodesSynced.Store(false)
+ s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
+ }
+
+ newHeight := minStreamedLastHeight
+ if newHeight > minUnappliedHeight {
+ newHeight = minUnappliedHeight
+ } else {
+ newHeight++
+ }
+ if allNodesSynced.Load() {
+ return newHeight
+ }
+ return from
+}
+
+func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) {
+ cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer()))
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
+ defer cancel()
+
+ req := &HealthcheckRequest{
+ Body: &HealthcheckRequest_Body{},
+ }
+ if err := SignMessage(req, key); err != nil {
+ return nil, err
+ }
+
+ // perform some request to check connection
+ if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
+ _ = cc.Close()
+ return nil, err
+ }
+ return cc, nil
+}
+
+func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ host, isTLS, err := client.ParseURI(a.URIAddr())
+ if err != nil {
+ return nil, err
+ }
+
+ creds := insecure.NewCredentials()
+ if isTLS {
+ creds = credentials.NewTLS(&tls.Config{})
+ }
+
+ defaultOpts := []grpc.DialOption{
+ grpc.WithChainUnaryInterceptor(
+ qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
+ metrics.NewUnaryClientInterceptor(),
+ tracing_grpc.NewUnaryClientInterceptor(),
+ tagging.NewUnaryClientInterceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
+ metrics.NewStreamClientInterceptor(),
+ tracing_grpc.NewStreamClientInterceptor(),
+ tagging.NewStreamClientInterceptor(),
+ ),
+ grpc.WithTransportCredentials(creds),
+ grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
+ grpc.WithDisableServiceConfig(),
+ }
+
+ return grpc.NewClient(host, append(defaultOpts, opts...)...)
}
// ErrAlreadySyncing is returned when a service synchronization has already
@@ -246,6 +419,9 @@ func (s *Service) SynchronizeAll() error {
return ErrShuttingDown
default:
}
+ if s.syncDisabled {
+ return nil
+ }
select {
case s.syncChan <- struct{}{}:
@@ -263,92 +439,128 @@ func (s *Service) syncLoop(ctx context.Context) {
case <-ctx.Done():
return
case <-s.syncChan:
- s.log.Debug("syncing trees...")
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync")
+ s.log.Info(ctx, logs.TreeSyncingTrees)
- cnrs, err := s.cfg.cnrSource.List()
+ start := time.Now()
+
+ cnrs, err := s.cnrSource.List(ctx)
if err != nil {
- s.log.Error("could not fetch containers", zap.Error(err))
- continue
+ s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
+ s.metrics.AddSyncDuration(time.Since(start), false)
+ span.End()
+ break
}
- newMap := make(map[cid.ID]struct{}, len(s.cnrMap))
- cnrsToSync := make([]cid.ID, 0, len(cnrs))
+ newMap, cnrsToSync := s.containersToSync(ctx, cnrs)
- var removed []cid.ID
- for _, cnr := range cnrs {
- _, pos, err := s.getContainerNodes(cnr)
- if err != nil {
- s.log.Error("could not calculate container nodes",
- zap.Stringer("cid", cnr),
- zap.Error(err))
- continue
- }
+ s.syncContainers(ctx, cnrsToSync)
- if pos < 0 {
- // node is not included in the container.
- continue
- }
+ s.removeContainers(ctx, newMap)
- newMap[cnr] = struct{}{}
- cnrsToSync = append(cnrsToSync, cnr)
+ s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized)
+
+ s.metrics.AddSyncDuration(time.Since(start), true)
+ span.End()
+ }
+ s.initialSyncDone.Store(true)
+ }
+}
+
+func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.syncContainers")
+ defer span.End()
+
+ // sync new containers
+ var wg sync.WaitGroup
+ for _, cnr := range cnrs {
+ wg.Add(1)
+
+ err := s.syncPool.Submit(func() {
+ defer wg.Done()
+ s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
+
+ err := s.synchronizeAllTrees(ctx, cnr)
+ if err != nil {
+ s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
+ return
}
- // sync new containers
- var wg sync.WaitGroup
- for _, cnr := range cnrsToSync {
- wg.Add(1)
- cnr := cnr
- err := s.syncPool.Submit(func() {
- defer wg.Done()
- s.log.Debug("syncing container trees...", zap.Stringer("cid", cnr))
-
- err := s.synchronizeAllTrees(ctx, cnr)
- if err != nil {
- s.log.Error("could not sync trees", zap.Stringer("cid", cnr), zap.Error(err))
- return
- }
-
- s.log.Debug("container trees have been synced", zap.Stringer("cid", cnr))
- })
- if err != nil {
- wg.Done()
- s.log.Error("could not query trees for synchronization",
- zap.Stringer("cid", cnr),
- zap.Error(err))
- if errors.Is(err, ants.ErrPoolClosed) {
- return
- }
- }
+ s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
+ })
+ if err != nil {
+ wg.Done()
+ s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization,
+ zap.Stringer("cid", cnr),
+ zap.Error(err))
+ if errors.Is(err, ants.ErrPoolClosed) {
+ return
}
- wg.Wait()
-
- s.cnrMapMtx.Lock()
- for cnr := range s.cnrMap {
- if _, ok := newMap[cnr]; ok {
- continue
- }
- removed = append(removed, cnr)
- }
- for i := range removed {
- delete(s.cnrMap, removed[i])
- }
- s.cnrMapMtx.Unlock()
-
- for _, cnr := range removed {
- s.log.Debug("removing redundant trees...", zap.Stringer("cid", cnr))
-
- err = s.DropTree(ctx, cnr, "")
- if err != nil {
- s.log.Error("could not remove redundant tree",
- zap.Stringer("cid", cnr),
- zap.Error(err))
- continue
- }
- }
-
- s.log.Debug("trees have been synchronized")
}
}
+ wg.Wait()
+}
+
+func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID]struct{}) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.removeContainers")
+ defer span.End()
+
+ s.cnrMapMtx.Lock()
+ defer s.cnrMapMtx.Unlock()
+
+ var removed []cid.ID
+ for cnr := range s.cnrMap {
+ if _, ok := newContainers[cnr]; ok {
+ continue
+ }
+
+ existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr)
+ if err != nil {
+ s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted,
+ zap.Stringer("cid", cnr),
+ zap.Error(err))
+ } else if existed {
+ removed = append(removed, cnr)
+ }
+ }
+ for i := range removed {
+ delete(s.cnrMap, removed[i])
+ }
+
+ for _, cnr := range removed {
+ s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
+
+ err := s.DropTree(ctx, cnr, "")
+ if err != nil {
+ s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree,
+ zap.Stringer("cid", cnr),
+ zap.Error(err))
+ }
+ }
+}
+
+func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
+ newMap := make(map[cid.ID]struct{}, len(s.cnrMap))
+ cnrsToSync := make([]cid.ID, 0, len(cnrs))
+
+ for _, cnr := range cnrs {
+ _, pos, err := s.getContainerNodes(ctx, cnr)
+ if err != nil {
+ s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes,
+ zap.Stringer("cid", cnr),
+ zap.Error(err))
+ continue
+ }
+
+ if pos < 0 {
+ // node is not included in the container.
+ continue
+ }
+
+ newMap[cnr] = struct{}{}
+ cnrsToSync = append(cnrsToSync, cnr)
+ }
+ return newMap, cnrsToSync
}
// randomizeNodeOrder shuffles nodes and removes not a `pos` index.
diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go
new file mode 100644
index 0000000000..87d419408f
--- /dev/null
+++ b/pkg/services/tree/sync_test.go
@@ -0,0 +1,79 @@
+package tree
+
+import (
+ "context"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_mergeOperationStreams(t *testing.T) {
+ tests := []struct {
+ name string
+ opTimes [][]uint64
+ wantValues []uint64
+ wantMinHeight uint64
+ }{
+ {
+ name: "1",
+ opTimes: [][]uint64{
+ {250, 251, 255},
+ {252, 253, 254, 256, 257},
+ },
+ wantValues: []uint64{250, 251, 252, 253, 254, 255, 256, 257},
+ wantMinHeight: 255,
+ },
+ {
+ name: "2",
+ opTimes: [][]uint64{
+ {250, 251, 255, 259},
+ {252, 253, 254, 256, 257},
+ },
+ wantValues: []uint64{250, 251, 252, 253, 254, 255, 256, 257, 259},
+ wantMinHeight: 257,
+ },
+ {
+ name: "3",
+ opTimes: [][]uint64{
+ {250, 251, 255},
+ {249, 250, 251, 253, 254, 256, 257},
+ },
+ wantValues: []uint64{249, 250, 250, 251, 251, 253, 254, 255, 256, 257},
+ wantMinHeight: 255,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ nodeOpChans := make([]chan *pilorama.Move, len(tt.opTimes))
+ for i := range nodeOpChans {
+ nodeOpChans[i] = make(chan *pilorama.Move)
+ }
+
+ // generate and put values to all chans
+ for i, ch := range nodeOpChans {
+ go func() {
+ for _, tm := range tt.opTimes[i] {
+ op := &pilorama.Move{}
+ op.Time = tm
+ ch <- op
+ }
+ close(nodeOpChans[i])
+ }()
+ }
+
+ merged := make(chan *pilorama.Move, 1)
+ min := make(chan uint64)
+ go func() {
+ min <- mergeOperationStreams(context.Background(), nodeOpChans, merged)
+ }()
+
+ var res []uint64
+ for op := range merged {
+ res = append(res, op.Time)
+ }
+ require.Equal(t, tt.wantValues, res)
+ require.Equal(t, tt.wantMinHeight, <-min)
+ })
+ }
+}
diff --git a/pkg/services/tree/types.pb.go b/pkg/services/tree/types.pb.go
deleted file mode 100644
index f3f64180a9..0000000000
--- a/pkg/services/tree/types.pb.go
+++ /dev/null
@@ -1,320 +0,0 @@
-//*
-// Auxiliary structures to use with tree service.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.21.12
-// source: pkg/services/tree/types.proto
-
-package tree
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// KeyValue represents key-value pair attached to an object.
-type KeyValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Attribute name.
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Attribute value.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (x *KeyValue) Reset() {
- *x = KeyValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_types_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *KeyValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*KeyValue) ProtoMessage() {}
-
-func (x *KeyValue) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_types_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
-func (*KeyValue) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *KeyValue) GetKey() string {
- if x != nil {
- return x.Key
- }
- return ""
-}
-
-func (x *KeyValue) GetValue() []byte {
- if x != nil {
- return x.Value
- }
- return nil
-}
-
-// LogMove represents log-entry for a single move operation.
-type LogMove struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the parent node.
- ParentId uint64 `protobuf:"varint,1,opt,name=parent_id,json=parentID,proto3" json:"parent_id,omitempty"`
- // Node meta information, including operation timestamp.
- Meta []byte `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta,omitempty"`
- // ID of the node to move.
- ChildId uint64 `protobuf:"varint,3,opt,name=child_id,json=childID,proto3" json:"child_id,omitempty"`
-}
-
-func (x *LogMove) Reset() {
- *x = LogMove{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_types_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *LogMove) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*LogMove) ProtoMessage() {}
-
-func (x *LogMove) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_types_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use LogMove.ProtoReflect.Descriptor instead.
-func (*LogMove) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *LogMove) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-func (x *LogMove) GetMeta() []byte {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *LogMove) GetChildId() uint64 {
- if x != nil {
- return x.ChildId
- }
- return 0
-}
-
-// Signature of a message.
-type Signature struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Serialized public key as defined in FrostFS API.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Signature of a message body.
- Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
-}
-
-func (x *Signature) Reset() {
- *x = Signature{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_types_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Signature) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Signature) ProtoMessage() {}
-
-func (x *Signature) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_types_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
-func (*Signature) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Signature) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-
-func (x *Signature) GetSign() []byte {
- if x != nil {
- return x.Sign
- }
- return nil
-}
-
-var File_pkg_services_tree_types_proto protoreflect.FileDescriptor
-
-var file_pkg_services_tree_types_proto_rawDesc = []byte{
- 0x0a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74,
- 0x72, 0x65, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
- 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x07, 0x4c, 0x6f, 0x67,
- 0x4d, 0x6f, 0x76, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
- 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69,
- 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x44,
- 0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x2e,
- 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75,
- 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66,
- 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_tree_types_proto_rawDescOnce sync.Once
- file_pkg_services_tree_types_proto_rawDescData = file_pkg_services_tree_types_proto_rawDesc
-)
-
-func file_pkg_services_tree_types_proto_rawDescGZIP() []byte {
- file_pkg_services_tree_types_proto_rawDescOnce.Do(func() {
- file_pkg_services_tree_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_tree_types_proto_rawDescData)
- })
- return file_pkg_services_tree_types_proto_rawDescData
-}
-
-var file_pkg_services_tree_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_pkg_services_tree_types_proto_goTypes = []interface{}{
- (*KeyValue)(nil), // 0: tree.KeyValue
- (*LogMove)(nil), // 1: tree.LogMove
- (*Signature)(nil), // 2: tree.Signature
-}
-var file_pkg_services_tree_types_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_tree_types_proto_init() }
-func file_pkg_services_tree_types_proto_init() {
- if File_pkg_services_tree_types_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_tree_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KeyValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LogMove); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Signature); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_tree_types_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 3,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_pkg_services_tree_types_proto_goTypes,
- DependencyIndexes: file_pkg_services_tree_types_proto_depIdxs,
- MessageInfos: file_pkg_services_tree_types_proto_msgTypes,
- }.Build()
- File_pkg_services_tree_types_proto = out.File
- file_pkg_services_tree_types_proto_rawDesc = nil
- file_pkg_services_tree_types_proto_goTypes = nil
- file_pkg_services_tree_types_proto_depIdxs = nil
-}
diff --git a/pkg/services/tree/types.proto b/pkg/services/tree/types.proto
index 23d73b9ad1..f122c7cf4c 100644
--- a/pkg/services/tree/types.proto
+++ b/pkg/services/tree/types.proto
@@ -10,25 +10,25 @@ option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tre
// KeyValue represents key-value pair attached to an object.
message KeyValue {
// Attribute name.
- string key = 1 [json_name = "key"];
+ string key = 1 [ json_name = "key" ];
// Attribute value.
- bytes value = 2 [json_name = "value"];
+ bytes value = 2 [ json_name = "value" ];
}
// LogMove represents log-entry for a single move operation.
message LogMove {
// ID of the parent node.
- uint64 parent_id = 1 [json_name = "parentID"];
+ uint64 parent_id = 1 [ json_name = "parentID" ];
// Node meta information, including operation timestamp.
- bytes meta = 2 [json_name = "meta"];
+ bytes meta = 2 [ json_name = "meta" ];
// ID of the node to move.
- uint64 child_id = 3 [json_name = "childID"];
+ uint64 child_id = 3 [ json_name = "childID" ];
}
// Signature of a message.
message Signature {
// Serialized public key as defined in FrostFS API.
- bytes key = 1 [json_name = "key"];
+ bytes key = 1 [ json_name = "key" ];
// Signature of a message body.
- bytes sign = 2 [json_name = "signature"];
+ bytes sign = 2 [ json_name = "signature" ];
}
diff --git a/pkg/services/tree/types_frostfs.pb.go b/pkg/services/tree/types_frostfs.pb.go
index 8e3c6a0266..2827b10a94 100644
--- a/pkg/services/tree/types_frostfs.pb.go
+++ b/pkg/services/tree/types_frostfs.pb.go
@@ -2,96 +2,623 @@
package tree
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
+)
+
+type KeyValue struct {
+ Key string `json:"key"`
+ Value []byte `json:"value"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*KeyValue)(nil)
+ _ encoding.ProtoUnmarshaler = (*KeyValue)(nil)
+ _ json.Marshaler = (*KeyValue)(nil)
+ _ json.Unmarshaler = (*KeyValue)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *KeyValue) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.StringSize(1, x.Key)
size += proto.BytesSize(2, x.Value)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *KeyValue) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.StringMarshal(1, buf[offset:], x.Key)
- offset += proto.BytesMarshal(2, buf[offset:], x.Value)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *KeyValue) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *KeyValue) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendString(1, x.Key)
+ }
+ if len(x.Value) != 0 {
+ mm.AppendBytes(2, x.Value)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *KeyValue) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "KeyValue")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Value
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Value")
+ }
+ x.Value = data
+ }
+ }
+ return nil
+}
+func (x *KeyValue) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+func (x *KeyValue) SetKey(v string) {
+ x.Key = v
+}
+func (x *KeyValue) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+func (x *KeyValue) SetValue(v []byte) {
+ x.Value = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *KeyValue) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *KeyValue) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ out.String(x.Key)
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"value\":"
+ out.RawString(prefix)
+ if x.Value != nil {
+ out.Base64Bytes(x.Value)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *KeyValue) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *KeyValue) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f string
+ f = in.String()
+ x.Key = f
+ }
+ case "value":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Value = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type LogMove struct {
+ ParentId uint64 `json:"parentID"`
+ Meta []byte `json:"meta"`
+ ChildId uint64 `json:"childID"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*LogMove)(nil)
+ _ encoding.ProtoUnmarshaler = (*LogMove)(nil)
+ _ json.Marshaler = (*LogMove)(nil)
+ _ json.Unmarshaler = (*LogMove)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *LogMove) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.UInt64Size(1, x.ParentId)
size += proto.BytesSize(2, x.Meta)
size += proto.UInt64Size(3, x.ChildId)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *LogMove) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.ParentId)
- offset += proto.BytesMarshal(2, buf[offset:], x.Meta)
- offset += proto.UInt64Marshal(3, buf[offset:], x.ChildId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *LogMove) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *LogMove) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(1, x.ParentId)
+ }
+ if len(x.Meta) != 0 {
+ mm.AppendBytes(2, x.Meta)
+ }
+ if x.ChildId != 0 {
+ mm.AppendUint64(3, x.ChildId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *LogMove) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "LogMove")
+ }
+ switch fc.FieldNum {
+ case 1: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ case 2: // Meta
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = data
+ case 3: // ChildId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChildId")
+ }
+ x.ChildId = data
+ }
+ }
+ return nil
+}
+func (x *LogMove) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *LogMove) SetParentId(v uint64) {
+ x.ParentId = v
+}
+func (x *LogMove) GetMeta() []byte {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *LogMove) SetMeta(v []byte) {
+ x.Meta = v
+}
+func (x *LogMove) GetChildId() uint64 {
+ if x != nil {
+ return x.ChildId
+ }
+ return 0
+}
+func (x *LogMove) SetChildId(v uint64) {
+ x.ChildId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *LogMove) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *LogMove) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"parentID\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
+ out.RawByte('"')
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"meta\":"
+ out.RawString(prefix)
+ if x.Meta != nil {
+ out.Base64Bytes(x.Meta)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"childID\":"
+ out.RawString(prefix)
+ out.RawByte('"')
+ out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ChildId, 10)
+ out.RawByte('"')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *LogMove) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *LogMove) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "parentID":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.ParentId = f
+ }
+ case "meta":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Meta = f
+ }
+ case "childID":
+ {
+ var f uint64
+ r := in.JsonNumber()
+ n := r.String()
+ v, err := strconv.ParseUint(n, 10, 64)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ pv := uint64(v)
+ f = pv
+ x.ChildId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type Signature struct {
+ Key []byte `json:"key"`
+ Sign []byte `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*Signature)(nil)
+ _ encoding.ProtoUnmarshaler = (*Signature)(nil)
+ _ json.Marshaler = (*Signature)(nil)
+ _ json.Unmarshaler = (*Signature)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
func (x *Signature) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
size += proto.BytesSize(1, x.Key)
size += proto.BytesSize(2, x.Sign)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *Signature) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Key)
- offset += proto.BytesMarshal(2, buf[offset:], x.Sign)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *Signature) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendBytes(1, x.Key)
+ }
+ if len(x.Sign) != 0 {
+ mm.AppendBytes(2, x.Sign)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *Signature) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "Signature")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Sign
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Sign")
+ }
+ x.Sign = data
+ }
+ }
+ return nil
+}
+func (x *Signature) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+func (x *Signature) SetKey(v []byte) {
+ x.Key = v
+}
+func (x *Signature) GetSign() []byte {
+ if x != nil {
+ return x.Sign
+ }
+ return nil
+}
+func (x *Signature) SetSign(v []byte) {
+ x.Sign = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *Signature) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ first := true
+ out.RawByte('{')
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"key\":"
+ out.RawString(prefix)
+ if x.Key != nil {
+ out.Base64Bytes(x.Key)
+ } else {
+ out.String("")
+ }
+ }
+ {
+ if !first {
+ out.RawByte(',')
+ } else {
+ first = false
+ }
+ const prefix string = "\"signature\":"
+ out.RawString(prefix)
+ if x.Sign != nil {
+ out.Base64Bytes(x.Sign)
+ } else {
+ out.String("")
+ }
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *Signature) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Key = f
+ }
+ case "signature":
+ {
+ var f []byte
+ {
+ tmp := in.Bytes()
+ if len(tmp) == 0 {
+ tmp = nil
+ }
+ f = tmp
+ }
+ x.Sign = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/util/response/client_stream.go b/pkg/services/util/response/client_stream.go
deleted file mode 100644
index f167f005a4..0000000000
--- a/pkg/services/util/response/client_stream.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package response
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
-)
-
-// ClientMessageStreamer represents client-side message streamer
-// that sets meta values to the response.
-type ClientMessageStreamer struct {
- cfg *cfg
-
- send util.RequestMessageWriter
-
- close util.ClientStreamCloser
-}
-
-// Send calls send method of internal streamer.
-func (s *ClientMessageStreamer) Send(req any) error {
- if err := s.send(req); err != nil {
- return fmt.Errorf("(%T) could not send the request: %w", s, err)
- }
- return nil
-}
-
-// CloseAndRecv closes internal stream, receivers the response,
-// sets meta values and returns the result.
-func (s *ClientMessageStreamer) CloseAndRecv() (util.ResponseMessage, error) {
- resp, err := s.close()
- if err != nil {
- return nil, fmt.Errorf("(%T) could not close stream and receive response: %w", s, err)
- }
-
- setMeta(resp, s.cfg)
-
- return resp, nil
-}
-
-// CreateRequestStreamer wraps stream methods and returns ClientMessageStreamer instance.
-func (s *Service) CreateRequestStreamer(sender util.RequestMessageWriter, closer util.ClientStreamCloser) *ClientMessageStreamer {
- return &ClientMessageStreamer{
- cfg: s.cfg,
- send: sender,
- close: closer,
- }
-}
diff --git a/pkg/services/util/response/server_stream.go b/pkg/services/util/response/server_stream.go
deleted file mode 100644
index 8a19fc4e78..0000000000
--- a/pkg/services/util/response/server_stream.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package response
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
-)
-
-// ServerMessageStreamer represents server-side message streamer
-// that sets meta values to all response messages.
-type ServerMessageStreamer struct {
- cfg *cfg
-
- recv util.ResponseMessageReader
-}
-
-// Recv calls Recv method of internal streamer, sets response meta
-// values and returns the response.
-func (s *ServerMessageStreamer) Recv() (util.ResponseMessage, error) {
- m, err := s.recv()
- if err != nil {
- return nil, fmt.Errorf("could not receive response message for signing: %w", err)
- }
-
- setMeta(m, s.cfg)
-
- return m, nil
-}
-
-// HandleServerStreamRequest builds internal streamer via handlers, wraps it to ServerMessageStreamer and returns the result.
-func (s *Service) HandleServerStreamRequest(respWriter util.ResponseMessageWriter) util.ResponseMessageWriter {
- return func(resp util.ResponseMessage) error {
- setMeta(resp, s.cfg)
-
- return respWriter(resp)
- }
-}
diff --git a/pkg/services/util/response/service.go b/pkg/services/util/response/service.go
index 87cc8383dd..5152a8ece8 100644
--- a/pkg/services/util/response/service.go
+++ b/pkg/services/util/response/service.go
@@ -1,54 +1,34 @@
package response
import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
// Service represents universal v2 service
// that sets response meta header values.
type Service struct {
- cfg *cfg
-}
-
-// Option is an option of Service constructor.
-type Option func(*cfg)
-
-type cfg struct {
version refs.Version
state netmap.State
}
-func defaultCfg() *cfg {
- var c cfg
-
- version.Current().WriteToV2(&c.version)
-
- return &c
-}
-
// NewService creates, initializes and returns Service instance.
-func NewService(opts ...Option) *Service {
- c := defaultCfg()
-
- for i := range opts {
- opts[i](c)
- }
-
- return &Service{
- cfg: c,
- }
+func NewService(nmState netmap.State) *Service {
+ s := &Service{state: nmState}
+ version.Current().WriteToV2(&s.version)
+ return s
}
-func setMeta(resp util.ResponseMessage, cfg *cfg) {
+// SetMeta sets adds meta-header to resp.
+func (s *Service) SetMeta(resp util.ResponseMessage) {
meta := new(session.ResponseMetaHeader)
- meta.SetVersion(&cfg.version)
+ meta.SetVersion(&s.version)
meta.SetTTL(1) // FIXME: #1160 TTL must be calculated
- meta.SetEpoch(cfg.state.CurrentEpoch())
+ meta.SetEpoch(s.state.CurrentEpoch())
if origin := resp.GetMetaHeader(); origin != nil {
// FIXME: #1160 what if origin is set by local server?
@@ -57,10 +37,3 @@ func setMeta(resp util.ResponseMessage, cfg *cfg) {
resp.SetMetaHeader(meta)
}
-
-// WithNetworkState returns option to set network state of Service.
-func WithNetworkState(v netmap.State) Option {
- return func(c *cfg) {
- c.state = v
- }
-}
diff --git a/pkg/services/util/response/unary.go b/pkg/services/util/response/unary.go
deleted file mode 100644
index 29cb953140..0000000000
--- a/pkg/services/util/response/unary.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package response
-
-import (
- "context"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
-)
-
-// HandleUnaryRequest call passes request to handler, sets response meta header values and returns it.
-func (s *Service) HandleUnaryRequest(ctx context.Context, req any, handler util.UnaryHandler) (util.ResponseMessage, error) {
- // process request
- resp, err := handler(ctx, req)
- if err != nil {
- return nil, fmt.Errorf("could not handle request: %w", err)
- }
-
- setMeta(resp, s.cfg)
-
- return resp, nil
-}
diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go
index 2478e6256e..348a45a942 100644
--- a/pkg/services/util/sign.go
+++ b/pkg/services/util/sign.go
@@ -1,13 +1,12 @@
package util
import (
- "context"
"crypto/ecdsa"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
@@ -21,203 +20,50 @@ type ResponseMessage interface {
SetMetaHeader(*session.ResponseMetaHeader)
}
-type UnaryHandler func(context.Context, any) (ResponseMessage, error)
-
type SignService struct {
key *ecdsa.PrivateKey
}
-type ResponseMessageWriter func(ResponseMessage) error
-
-type ServerStreamHandler func(context.Context, any) (ResponseMessageReader, error)
-
-type ResponseMessageReader func() (ResponseMessage, error)
-
var ErrAbortStream = errors.New("abort message stream")
-type ResponseConstructor func() ResponseMessage
-
-type RequestMessageWriter func(any) error
-
-type ClientStreamCloser func() (ResponseMessage, error)
-
-type RequestMessageStreamer struct {
- key *ecdsa.PrivateKey
-
- send RequestMessageWriter
-
- close ClientStreamCloser
-
- respCons ResponseConstructor
-
- statusSupported bool
-
- sendErr error
-}
-
func NewUnarySignService(key *ecdsa.PrivateKey) *SignService {
return &SignService{
key: key,
}
}
-func (s *RequestMessageStreamer) Send(req any) error {
- // req argument should be strengthen with type RequestMessage
- s.statusSupported = isStatusSupported(req.(RequestMessage)) // panic is OK here for now
-
- var err error
-
- // verify request signatures
- if err = signature.VerifyServiceMessage(req); err != nil {
- err = fmt.Errorf("could not verify request: %w", err)
- } else {
- err = s.send(req)
+// SignResponse response with private key via signature.SignServiceMessage.
+// The signature error affects the result depending on the protocol version:
+// - if status return is supported, panics since we cannot return the failed status, because it will not be signed.
+// - otherwise, returns error in order to transport it directly.
+func (s *SignService) SignResponse(resp ResponseMessage, err error) error {
+ if err != nil {
+ setStatusV2(resp, err)
}
+ err = signature.SignServiceMessage(s.key, resp)
if err != nil {
- if !s.statusSupported {
- return err
- }
-
- s.sendErr = err
-
- return ErrAbortStream
+ return fmt.Errorf("could not sign response: %w", err)
}
return nil
}
-func (s *RequestMessageStreamer) CloseAndRecv() (ResponseMessage, error) {
- var (
- resp ResponseMessage
- err error
- )
-
- if s.sendErr != nil {
- err = s.sendErr
- } else {
- resp, err = s.close()
- if err != nil {
- err = fmt.Errorf("could not close stream and receive response: %w", err)
- }
- }
-
- if err != nil {
- if !s.statusSupported {
- return nil, err
- }
-
- resp = s.respCons()
-
- setStatusV2(resp, err)
- }
-
- if err = signResponse(s.key, resp, s.statusSupported); err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-func (s *SignService) CreateRequestStreamer(sender RequestMessageWriter, closer ClientStreamCloser, blankResp ResponseConstructor) *RequestMessageStreamer {
- return &RequestMessageStreamer{
- key: s.key,
- send: sender,
- close: closer,
-
- respCons: blankResp,
- }
-}
-
-func (s *SignService) HandleServerStreamRequest(
- req any,
- respWriter ResponseMessageWriter,
- blankResp ResponseConstructor,
- respWriterCaller func(ResponseMessageWriter) error,
-) error {
- // handle protocol versions <=2.10 (API statuses was introduced in 2.11 only)
-
- // req argument should be strengthen with type RequestMessage
- statusSupported := isStatusSupported(req.(RequestMessage)) // panic is OK here for now
-
- var err error
-
- // verify request signatures
- if err = signature.VerifyServiceMessage(req); err != nil {
- err = fmt.Errorf("could not verify request: %w", err)
- } else {
- err = respWriterCaller(func(resp ResponseMessage) error {
- if err := signResponse(s.key, resp, statusSupported); err != nil {
- return err
- }
-
- return respWriter(resp)
- })
- }
-
- if err != nil {
- if !statusSupported {
- return err
- }
-
- resp := blankResp()
-
- setStatusV2(resp, err)
-
- _ = signResponse(s.key, resp, false) // panics or returns nil with false arg
-
- return respWriter(resp)
- }
-
- return nil
-}
-
-func (s *SignService) HandleUnaryRequest(ctx context.Context, req any, handler UnaryHandler, blankResp ResponseConstructor) (ResponseMessage, error) {
- // handle protocol versions <=2.10 (API statuses was introduced in 2.11 only)
-
- // req argument should be strengthen with type RequestMessage
- statusSupported := isStatusSupported(req.(RequestMessage)) // panic is OK here for now
-
- var (
- resp ResponseMessage
- err error
- )
-
- // verify request signatures
- if err = signature.VerifyServiceMessage(req); err != nil {
- var sigErr apistatus.SignatureVerification
+func (s *SignService) VerifyRequest(req RequestMessage) error {
+ if err := signature.VerifyServiceMessage(req); err != nil {
+ sigErr := new(apistatus.SignatureVerification)
sigErr.SetMessage(err.Error())
-
- err = sigErr
- } else {
- // process request
- resp, err = handler(ctx, req)
+ return sigErr
}
-
- if err != nil {
- if !statusSupported {
- return nil, err
- }
-
- resp = blankResp()
-
- setStatusV2(resp, err)
- }
-
- // sign the response
- if err = signResponse(s.key, resp, statusSupported); err != nil {
- return nil, err
- }
-
- return resp, nil
+ return nil
}
-func isStatusSupported(req RequestMessage) bool {
- version := req.GetMetaHeader().GetVersion()
-
- mjr := version.GetMajor()
-
- return mjr > 2 || mjr == 2 && version.GetMinor() >= 11
+// EnsureNonNilResponse creates an appropriate response struct if it is nil.
+func EnsureNonNilResponse[T any](resp *T, err error) (*T, error) {
+ if resp != nil {
+ return resp, err
+ }
+ return new(T), err
}
func setStatusV2(resp ResponseMessage, err error) {
@@ -228,22 +74,3 @@ func setStatusV2(resp ResponseMessage, err error) {
session.SetStatus(resp, apistatus.ToStatusV2(apistatus.ErrToStatus(err)))
}
-
-// signs response with private key via signature.SignServiceMessage.
-// The signature error affects the result depending on the protocol version:
-// - if status return is supported, panics since we cannot return the failed status, because it will not be signed;
-// - otherwise, returns error in order to transport it directly.
-func signResponse(key *ecdsa.PrivateKey, resp any, statusSupported bool) error {
- err := signature.SignServiceMessage(key, resp)
- if err != nil {
- err = fmt.Errorf("could not sign response: %w", err)
-
- if statusSupported {
- // We can't pass this error as status code since response will be unsigned.
- // Isn't expected in practice, so panic is ok here.
- panic(err)
- }
- }
-
- return err
-}
diff --git a/pkg/tracing/trace.go b/pkg/tracing/trace.go
new file mode 100644
index 0000000000..dc2f90259f
--- /dev/null
+++ b/pkg/tracing/trace.go
@@ -0,0 +1,19 @@
+package tracing
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/trace"
+)
+
+var emptyTraceID = [16]byte{}
+
+// GetTraceID retrieves the trace ID from the provided context.
+// It returns an empty string if no trace ID is found.
+func GetTraceID(ctx context.Context) string {
+ span := trace.SpanFromContext(ctx)
+ if span == nil || span.SpanContext().TraceID() == emptyTraceID {
+ return ""
+ }
+ return span.SpanContext().TraceID().String()
+}
diff --git a/pkg/util/ape/converter.go b/pkg/util/ape/converter.go
new file mode 100644
index 0000000000..c706cf0526
--- /dev/null
+++ b/pkg/util/ape/converter.go
@@ -0,0 +1,280 @@
+package ape
+
+import (
+ "encoding/hex"
+ "fmt"
+
+ v2acl "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+)
+
+type ConvertEACLError struct {
+ nested error
+}
+
+func (e *ConvertEACLError) Error() string {
+ if e == nil {
+ return ""
+ }
+ return "failed to convert eACL table to policy engine chain: " + e.nested.Error()
+}
+
+func (e *ConvertEACLError) Unwrap() error {
+ if e == nil {
+ return nil
+ }
+ return e.nested
+}
+
+// ConvertEACLToAPE converts eacl.Table to apechain.Chain.
+func ConvertEACLToAPE(eaclTable *eacl.Table) (*apechain.Chain, error) {
+ if eaclTable == nil {
+ return nil, nil
+ }
+ res := &apechain.Chain{
+ MatchType: apechain.MatchTypeFirstMatch,
+ }
+
+ resource := getResource(eaclTable)
+
+ for _, eaclRecord := range eaclTable.Records() {
+ if len(eaclRecord.Targets()) == 0 {
+ // see https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L101
+ // and https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L36
+ // such record doesn't have any effect
+ continue
+ }
+
+ st, err := actionToStatus(eaclRecord.Action())
+ if err != nil {
+ return nil, err
+ }
+ act, err := operationToAction(eaclRecord.Operation())
+ if err != nil {
+ return nil, err
+ }
+
+ if len(eaclRecord.Filters()) == 0 {
+ res.Rules = appendTargetsOnly(res.Rules, st, act, resource, eaclRecord.Targets())
+ } else {
+ res.Rules, err = appendTargetsAndFilters(res.Rules, st, act, resource, eaclRecord.Targets(), eaclRecord.Filters())
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return res, nil
+}
+
+func apeRoleConds(role eacl.Role) (res []apechain.Condition) {
+ switch role {
+ case eacl.RoleSystem:
+ res = append(res,
+ apechain.Condition{
+ Op: apechain.CondStringEquals,
+ Kind: apechain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleContainer,
+ },
+ )
+ res = append(res,
+ apechain.Condition{
+ Op: apechain.CondStringEquals,
+ Kind: apechain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleIR,
+ },
+ )
+ case eacl.RoleOthers:
+ res = append(res,
+ apechain.Condition{
+ Op: apechain.CondStringEquals,
+ Kind: apechain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOthers,
+ },
+ )
+ case eacl.RoleUser:
+ res = append(res,
+ apechain.Condition{
+ Op: apechain.CondStringEquals,
+ Kind: apechain.KindRequest,
+ Key: nativeschema.PropertyKeyActorRole,
+ Value: nativeschema.PropertyValueContainerRoleOwner,
+ },
+ )
+ case eacl.RoleUnknown:
+ // such condition has no effect
+ default:
+ }
+ return
+}
+
+func appendTargetsOnly(source []apechain.Rule, st apechain.Status, act apechain.Actions, res apechain.Resources, targets []eacl.Target) []apechain.Rule {
+ // see https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L101
+ // role OR public key must be equal
+ rule := apechain.Rule{
+ Status: st,
+ Actions: act,
+ Resources: res,
+ Any: true,
+ }
+ for _, target := range targets {
+ rule.Condition = append(rule.Condition, apeRoleConds(target.Role())...)
+ for _, binKey := range target.BinaryKeys() {
+ var pubKeyCondition apechain.Condition
+ pubKeyCondition.Kind = apechain.KindRequest
+ pubKeyCondition.Key = nativeschema.PropertyKeyActorPublicKey
+ pubKeyCondition.Value = hex.EncodeToString(binKey)
+ pubKeyCondition.Op = apechain.CondStringEquals
+ rule.Condition = append(rule.Condition, pubKeyCondition)
+ }
+ }
+ return append(source, rule)
+}
+
+func appendTargetsAndFilters(source []apechain.Rule, st apechain.Status, act apechain.Actions, res apechain.Resources,
+ targets []eacl.Target, filters []eacl.Filter,
+) ([]apechain.Rule, error) {
+ // see https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L101
+ // role OR public key must be equal
+ // so filters are repeated for each role and public key
+ var err error
+ for _, target := range targets {
+ rule := apechain.Rule{
+ Status: st,
+ Actions: act,
+ Resources: res,
+ }
+ rule.Condition = append(rule.Condition, apeRoleConds(target.Role())...)
+ rule.Condition, err = appendFilters(rule.Condition, filters)
+ if err != nil {
+ return nil, err
+ }
+
+ source = append(source, rule)
+
+ for _, binKey := range target.BinaryKeys() {
+ rule := apechain.Rule{
+ Status: st,
+ Actions: act,
+ Resources: res,
+ }
+ var pubKeyCondition apechain.Condition
+ pubKeyCondition.Kind = apechain.KindRequest
+ pubKeyCondition.Key = nativeschema.PropertyKeyActorPublicKey
+ pubKeyCondition.Value = hex.EncodeToString(binKey)
+ pubKeyCondition.Op = apechain.CondStringEquals
+
+ rule.Condition = append(rule.Condition, pubKeyCondition)
+ rule.Condition, err = appendFilters(rule.Condition, filters)
+ if err != nil {
+ return nil, err
+ }
+
+ source = append(source, rule)
+ }
+ }
+
+ return source, nil
+}
+
+func appendFilters(source []apechain.Condition, filters []eacl.Filter) ([]apechain.Condition, error) {
+ for _, filter := range filters {
+ var cond apechain.Condition
+ var isObject bool
+ if filter.From() == eacl.HeaderFromObject {
+ cond.Kind = apechain.KindResource
+ isObject = true
+ } else if filter.From() == eacl.HeaderFromRequest {
+ cond.Kind = apechain.KindRequest
+ } else {
+ return nil, &ConvertEACLError{nested: fmt.Errorf("unknown filter from: %d", filter.From())}
+ }
+
+ if filter.Matcher() == eacl.MatchStringEqual {
+ cond.Op = apechain.CondStringEquals
+ } else if filter.Matcher() == eacl.MatchStringNotEqual {
+ cond.Op = apechain.CondStringNotEquals
+ } else {
+ return nil, &ConvertEACLError{nested: fmt.Errorf("unknown filter matcher: %d", filter.Matcher())}
+ }
+
+ cond.Key = eaclKeyToAPEKey(filter.Key(), isObject)
+ cond.Value = filter.Value()
+
+ source = append(source, cond)
+ }
+ return source, nil
+}
+
+func eaclKeyToAPEKey(key string, isObject bool) string {
+ if !isObject {
+ return key
+ }
+ switch key {
+ default:
+ return key
+ case v2acl.FilterObjectVersion:
+ return nativeschema.PropertyKeyObjectVersion
+ case v2acl.FilterObjectID:
+ return nativeschema.PropertyKeyObjectID
+ case v2acl.FilterObjectContainerID:
+ return nativeschema.PropertyKeyObjectContainerID
+ case v2acl.FilterObjectOwnerID:
+ return nativeschema.PropertyKeyObjectOwnerID
+ case v2acl.FilterObjectCreationEpoch:
+ return nativeschema.PropertyKeyObjectCreationEpoch
+ case v2acl.FilterObjectPayloadLength:
+ return nativeschema.PropertyKeyObjectPayloadLength
+ case v2acl.FilterObjectPayloadHash:
+ return nativeschema.PropertyKeyObjectPayloadHash
+ case v2acl.FilterObjectType:
+ return nativeschema.PropertyKeyObjectType
+ case v2acl.FilterObjectHomomorphicHash:
+ return nativeschema.PropertyKeyObjectHomomorphicHash
+ }
+}
+
+func getResource(eaclTable *eacl.Table) apechain.Resources {
+ cnrID, isSet := eaclTable.CID()
+ if isSet {
+ return apechain.Resources{
+ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
+ }
+ }
+ return apechain.Resources{
+ Names: []string{nativeschema.ResourceFormatRootObjects},
+ }
+}
+
+func actionToStatus(a eacl.Action) (apechain.Status, error) {
+ switch a {
+ case eacl.ActionAllow:
+ return apechain.Allow, nil
+ case eacl.ActionDeny:
+ return apechain.AccessDenied, nil
+ default:
+ return apechain.NoRuleFound, &ConvertEACLError{nested: fmt.Errorf("unknown action: %d", a)}
+ }
+}
+
+var eaclOperationToEngineAction = map[eacl.Operation]apechain.Actions{
+ eacl.OperationGet: {Names: []string{nativeschema.MethodGetObject}},
+ eacl.OperationHead: {Names: []string{nativeschema.MethodHeadObject}},
+ eacl.OperationPut: {Names: []string{nativeschema.MethodPutObject}},
+ eacl.OperationDelete: {Names: []string{nativeschema.MethodDeleteObject}},
+ eacl.OperationSearch: {Names: []string{nativeschema.MethodSearchObject}},
+ eacl.OperationRange: {Names: []string{nativeschema.MethodRangeObject}},
+ eacl.OperationRangeHash: {Names: []string{nativeschema.MethodHashObject}},
+}
+
+func operationToAction(op eacl.Operation) (apechain.Actions, error) {
+ if v, ok := eaclOperationToEngineAction[op]; ok {
+ return v, nil
+ }
+ return apechain.Actions{}, &ConvertEACLError{nested: fmt.Errorf("unknown operation: %d", op)}
+}
diff --git a/pkg/util/ape/converter_test.go b/pkg/util/ape/converter_test.go
new file mode 100644
index 0000000000..28125606cf
--- /dev/null
+++ b/pkg/util/ape/converter_test.go
@@ -0,0 +1,471 @@
+package ape
+
+import (
+ "encoding/hex"
+ "fmt"
+ "testing"
+
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEACLTableWithoutRecords(t *testing.T) {
+ t.Parallel()
+
+ tb := eacl.NewTable()
+ ch, err := ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ vu := &eacl.ValidationUnit{}
+ vu.WithEACLTable(tb)
+ req := &testRequest{
+ res: &testResource{name: nativeschema.ResourceFormatRootObjects},
+ }
+
+ compare(t, vu, ch, req)
+
+ cnrID := cidtest.ID()
+ tb.SetCID(cnrID)
+ vu.WithContainerID(&cnrID)
+ req.res.name = fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())
+
+ ch, err = ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ compare(t, vu, ch, req)
+}
+
+func TestNoTargets(t *testing.T) {
+ t.Parallel()
+ for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
+ cnrID := cidtest.ID()
+ tb := eacl.NewTable()
+ tb.SetCID(cnrID)
+
+ vu := &eacl.ValidationUnit{}
+ vu.WithEACLTable(tb)
+ vu.WithContainerID(&cnrID)
+ vu.WithRole(eacl.RoleOthers)
+
+ // deny delete without role or key specified
+ record := eacl.NewRecord()
+ record.SetAction(act)
+ record.SetOperation(eacl.OperationDelete)
+ record.AddObjectContainerIDFilter(eacl.MatchStringEqual, cnrID)
+
+ tb.AddRecord(record)
+
+ ch, err := ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ req := &testRequest{
+ props: map[string]string{
+ nativeschema.PropertyKeyActorRole: eacl.RoleOthers.String(),
+ },
+ res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
+ }
+ compare(t, vu, ch, req)
+ }
+}
+
+func TestNoFilters(t *testing.T) {
+ t.Parallel()
+
+ t.Run("target match by role only", func(t *testing.T) {
+ t.Parallel()
+
+ for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
+ cnrID := cidtest.ID()
+ tb := eacl.NewTable()
+ tb.SetCID(cnrID)
+
+ vu := &eacl.ValidationUnit{}
+ vu.WithEACLTable(tb)
+ vu.WithContainerID(&cnrID)
+ vu.WithRole(eacl.RoleOthers)
+
+ // allow/deny for OTHERS
+ record := eacl.NewRecord()
+ record.SetAction(act)
+ record.SetOperation(eacl.OperationDelete)
+
+ target := eacl.NewTarget()
+ target.SetRole(eacl.RoleOthers)
+ record.SetTargets(*target)
+
+ tb.AddRecord(record)
+
+ ch, err := ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ req := &testRequest{
+ props: map[string]string{
+ nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers,
+ },
+ res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
+ }
+ compare(t, vu, ch, req)
+ }
+ })
+
+ t.Run("target match by role and public key", func(t *testing.T) {
+ t.Parallel()
+
+ for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
+ cnrID := cidtest.ID()
+ tb := eacl.NewTable()
+ tb.SetCID(cnrID)
+
+ vu := &eacl.ValidationUnit{}
+ vu.WithEACLTable(tb)
+ vu.WithContainerID(&cnrID)
+ vu.WithRole(eacl.RoleOthers)
+
+ // allow/deny for OTHERS
+ record := eacl.NewRecord()
+ record.SetAction(act)
+ record.SetOperation(eacl.OperationDelete)
+
+ p1, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ p2, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ vu.WithSenderKey(p2.PublicKey().Bytes())
+
+ target := eacl.NewTarget()
+ target.SetRole(eacl.RoleOthers)
+ target.SetBinaryKeys([][]byte{p1.PublicKey().Bytes(), p2.PublicKey().Bytes()})
+ record.SetTargets(*target)
+
+ tb.AddRecord(record)
+
+ ch, err := ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ req := &testRequest{
+ props: map[string]string{
+ nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers,
+ nativeschema.PropertyKeyActorPublicKey: string(p2.PublicKey().Bytes()),
+ },
+ res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
+ }
+ compare(t, vu, ch, req)
+ }
+ })
+
+ t.Run("target match by public key only", func(t *testing.T) {
+ t.Parallel()
+
+ for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
+ cnrID := cidtest.ID()
+ tb := eacl.NewTable()
+ tb.SetCID(cnrID)
+
+ vu := &eacl.ValidationUnit{}
+ vu.WithEACLTable(tb)
+ vu.WithContainerID(&cnrID)
+
+ // allow/deny for OTHERS
+ record := eacl.NewRecord()
+ record.SetAction(act)
+ record.SetOperation(eacl.OperationDelete)
+
+ p1, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ p2, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ vu.WithSenderKey(p2.PublicKey().Bytes())
+
+ target := eacl.NewTarget()
+ target.SetRole(eacl.RoleOthers)
+ target.SetBinaryKeys([][]byte{p1.PublicKey().Bytes(), p2.PublicKey().Bytes()})
+ record.SetTargets(*target)
+
+ tb.AddRecord(record)
+
+ ch, err := ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ req := &testRequest{
+ props: map[string]string{
+ nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(p2.PublicKey().Bytes()),
+ },
+ res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
+ }
+ compare(t, vu, ch, req)
+ }
+ })
+
+ t.Run("target doesn't match", func(t *testing.T) {
+ t.Parallel()
+
+ for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
+ cnrID := cidtest.ID()
+ tb := eacl.NewTable()
+ tb.SetCID(cnrID)
+
+ vu := &eacl.ValidationUnit{}
+ vu.WithEACLTable(tb)
+ vu.WithContainerID(&cnrID)
+ vu.WithRole(eacl.RoleSystem)
+
+ // allow/deny for OTHERS
+ record := eacl.NewRecord()
+ record.SetAction(act)
+ record.SetOperation(eacl.OperationDelete)
+
+ target := eacl.NewTarget()
+ target.SetRole(eacl.RoleOthers)
+ record.SetTargets(*target)
+
+ tb.AddRecord(record)
+
+ ch, err := ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ req := &testRequest{
+ props: map[string]string{
+ nativeschema.PropertyKeyActorRole: eacl.RoleSystem.String(),
+ },
+ res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
+ }
+ compare(t, vu, ch, req)
+ }
+ })
+}
+
+func TestWithFilters(t *testing.T) {
+ t.Parallel()
+
+ t.Run("object attributes", func(t *testing.T) {
+ t.Parallel()
+
+ const attrKey = "attribute_1"
+ const attrValue = "attribute_1_value"
+
+ for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
+ cnrID := cidtest.ID()
+ tb := eacl.NewTable()
+ tb.SetCID(cnrID)
+
+ vu := &eacl.ValidationUnit{}
+ vu.WithEACLTable(tb)
+ vu.WithContainerID(&cnrID)
+ vu.WithRole(eacl.RoleOthers)
+ vu.WithHeaderSource(&testHeaderSource{
+ headers: map[eacl.FilterHeaderType][]eacl.Header{
+ eacl.HeaderFromObject: {&testHeader{key: attrKey, value: attrValue}},
+ },
+ })
+
+ // allow/deny for OTHERS
+ record := eacl.NewRecord()
+ record.SetAction(act)
+ record.SetOperation(eacl.OperationDelete)
+
+ target := eacl.NewTarget()
+ target.SetRole(eacl.RoleOthers)
+ record.SetTargets(*target)
+
+ record.AddObjectAttributeFilter(eacl.MatchStringEqual, attrKey, attrValue)
+
+ tb.AddRecord(record)
+
+ ch, err := ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ req := &testRequest{
+ props: map[string]string{
+ nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers,
+ },
+ res: &testResource{
+ name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()),
+ props: map[string]string{
+ attrKey: attrValue,
+ },
+ },
+ }
+ compare(t, vu, ch, req)
+ }
+ })
+
+ t.Run("request attributes", func(t *testing.T) {
+ t.Parallel()
+
+ const attrKey = "attribute_1"
+ const attrValue = "attribute_1_value"
+
+ for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
+ cnrID := cidtest.ID()
+ tb := eacl.NewTable()
+ tb.SetCID(cnrID)
+
+ vu := &eacl.ValidationUnit{}
+ vu.WithEACLTable(tb)
+ vu.WithContainerID(&cnrID)
+ vu.WithRole(eacl.RoleOthers)
+ vu.WithHeaderSource(&testHeaderSource{
+ headers: map[eacl.FilterHeaderType][]eacl.Header{
+ eacl.HeaderFromRequest: {&testHeader{key: attrKey, value: attrValue}},
+ },
+ })
+
+ // allow/deny for OTHERS
+ record := eacl.NewRecord()
+ record.SetAction(act)
+ record.SetOperation(eacl.OperationDelete)
+
+ target := eacl.NewTarget()
+ target.SetRole(eacl.RoleOthers)
+ record.SetTargets(*target)
+
+ record.AddFilter(eacl.HeaderFromRequest, eacl.MatchStringEqual, attrKey, attrValue)
+
+ tb.AddRecord(record)
+
+ ch, err := ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ req := &testRequest{
+ props: map[string]string{
+ nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers,
+ attrKey: attrValue,
+ },
+ res: &testResource{
+ name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()),
+ },
+ }
+ compare(t, vu, ch, req)
+ }
+ })
+}
+
+func TestNoHeader(t *testing.T) {
+ t.Skip("Should pass after https://git.frostfs.info/TrueCloudLab/policy-engine/issues/8#issuecomment-26126")
+
+ t.Parallel()
+
+ const attrKey = "attribute_1"
+ cnrID := cidtest.ID()
+ tb := eacl.NewTable()
+ tb.SetCID(cnrID)
+
+ vu := &eacl.ValidationUnit{}
+ vu.WithEACLTable(tb)
+ vu.WithContainerID(&cnrID)
+ vu.WithRole(eacl.RoleOthers)
+ vu.WithHeaderSource(&testHeaderSource{
+ headers: map[eacl.FilterHeaderType][]eacl.Header{
+ eacl.HeaderFromRequest: {},
+ },
+ })
+
+ // allow/deny for OTHERS
+ record := eacl.NewRecord()
+ record.SetAction(eacl.ActionDeny)
+ record.SetOperation(eacl.OperationDelete)
+
+ target := eacl.NewTarget()
+ target.SetRole(eacl.RoleOthers)
+ record.SetTargets(*target)
+
+ record.AddFilter(eacl.HeaderFromRequest, eacl.MatchStringEqual, attrKey, "")
+
+ tb.AddRecord(record)
+
+ ch, err := ConvertEACLToAPE(tb)
+ require.NoError(t, err)
+
+ req := &testRequest{
+ props: map[string]string{
+ nativeschema.PropertyKeyActorRole: eacl.RoleOthers.String(),
+ },
+ res: &testResource{
+ name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()),
+ },
+ }
+ compare(t, vu, ch, req)
+}
+
+func compare(t *testing.T, vu *eacl.ValidationUnit, ch *apechain.Chain, req *testRequest) {
+ validator := eacl.NewValidator()
+ for eaclOp, apeOp := range eaclOperationToEngineAction {
+ vu.WithOperation(eaclOp)
+ req.op = apeOp.Names[0]
+
+ eaclAct, recordFound := validator.CalculateAction(vu)
+ apeSt, ruleFound := ch.Match(req)
+
+ require.Equal(t, recordFound, ruleFound)
+ require.NotEqual(t, eacl.ActionUnknown, eaclAct)
+ if eaclAct == eacl.ActionAllow {
+ if recordFound {
+ require.Equal(t, apechain.Allow, apeSt)
+ } else {
+ require.Equal(t, apechain.NoRuleFound, apeSt)
+ }
+ } else {
+ require.Equal(t, apechain.AccessDenied, apeSt)
+ }
+ }
+}
+
+type testRequest struct {
+ op string
+ props map[string]string
+ res *testResource
+}
+
+func (r *testRequest) Operation() string {
+ return r.op
+}
+
+func (r *testRequest) Property(key string) string {
+ if v, ok := r.props[key]; ok {
+ return v
+ }
+ return ""
+}
+
+func (r *testRequest) Resource() resource.Resource {
+ return r.res
+}
+
+type testResource struct {
+ name string
+ props map[string]string
+}
+
+func (r *testResource) Name() string {
+ return r.name
+}
+
+func (r *testResource) Property(key string) string {
+ if v, ok := r.props[key]; ok {
+ return v
+ }
+ return ""
+}
+
+type testHeaderSource struct {
+ headers map[eacl.FilterHeaderType][]eacl.Header
+}
+
+func (s *testHeaderSource) HeadersOfType(t eacl.FilterHeaderType) ([]eacl.Header, bool) {
+ v, ok := s.headers[t]
+ return v, ok
+}
+
+type testHeader struct {
+ key, value string
+}
+
+func (h *testHeader) Key() string { return h.key }
+func (h *testHeader) Value() string { return h.value }
diff --git a/pkg/util/ape/parser.go b/pkg/util/ape/parser.go
new file mode 100644
index 0000000000..6f114d45b9
--- /dev/null
+++ b/pkg/util/ape/parser.go
@@ -0,0 +1,321 @@
+package ape
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/flynn-archive/go-shlex"
+)
+
+var (
+ errInvalidStatementFormat = errors.New("invalid statement format")
+ errInvalidConditionFormat = errors.New("invalid condition format")
+ errUnknownStatus = errors.New("status is not recognized")
+ errUnknownStatusDetail = errors.New("status detail is not recognized")
+ errUnknownAction = errors.New("action is not recognized")
+ errUnknownBinaryOperator = errors.New("binary operator is not recognized")
+ errUnknownCondObjectType = errors.New("condition object type is not recognized")
+ errMixedTypesInRule = errors.New("found mixed type of actions in rule")
+ errNoActionsInRule = errors.New("there are no actions in rule")
+ errUnsupportedResourceFormat = errors.New("unsupported resource format")
+ errFailedToParseAllAny = errors.New("any/all is not parsed")
+)
+
+func ParseAPEChainBinaryOrJSON(chain *apechain.Chain, path string) error {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("read file <%s>: %w", path, err)
+ }
+
+ err = chain.UnmarshalBinary(data)
+ if err != nil {
+ err = chain.UnmarshalJSON(data)
+ if err != nil {
+ return fmt.Errorf("invalid format: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// ParseAPEChain parses APE chain rules.
+func ParseAPEChain(chain *apechain.Chain, rules []string) error {
+ if len(rules) == 0 {
+ return errors.New("no APE rules provided")
+ }
+
+ for _, rule := range rules {
+ r := new(apechain.Rule)
+ if err := ParseAPERule(r, rule); err != nil {
+ return err
+ }
+ chain.Rules = append(chain.Rules, *r)
+ }
+
+ return nil
+}
+
+// ParseAPERule parses access-policy-engine statement from the following form:
+// [:status_detail] ... [...] ...
+//
+// Examples:
+// deny Object.Put *
+// deny:QuotaLimitReached Object.Put *
+// allow Object.Put *
+// allow Object.Get ResourceCondition:Department=HR RequestCondition:Actor=ownerA *
+// allow Object.Get any ResourceCondition:Department=HR RequestCondition:Actor=ownerA *
+// allow Object.Get all ResourceCondition:Department=HR RequestCondition:Actor=ownerA *
+// allow Object.* *
+// allow Container.* *
+//
+//nolint:godot
+func ParseAPERule(r *apechain.Rule, rule string) error {
+ lexemes, err := shlex.Split(rule)
+ if err != nil {
+ return fmt.Errorf("can't parse rule '%s': %v", rule, err)
+ }
+ return parseRuleLexemes(r, lexemes)
+}
+
+func unique(inputSlice []string) []string {
+ uniqueSlice := make([]string, 0, len(inputSlice))
+ seen := make(map[string]bool, len(inputSlice))
+ for _, element := range inputSlice {
+ if !seen[element] {
+ uniqueSlice = append(uniqueSlice, element)
+ seen[element] = true
+ }
+ }
+ return uniqueSlice
+}
+
+func parseRuleLexemes(r *apechain.Rule, lexemes []string) error {
+ if len(lexemes) < 2 {
+ return errInvalidStatementFormat
+ }
+
+ var err error
+ r.Status, err = parseStatus(lexemes[0])
+ if err != nil {
+ return err
+ }
+
+ var objectTargeted bool
+ var containerTargeted bool
+
+ for i, lexeme := range lexemes[1:] {
+ anyExpr, anyErr := parseAnyAll(lexeme)
+ if anyErr == nil {
+ r.Any = anyExpr
+ continue
+ }
+
+ var names []string
+ var actionType bool
+ names, actionType, err = parseAction(lexeme)
+ if err != nil {
+ condition, errCond := parseCondition(lexeme)
+ if errCond != nil {
+ err = fmt.Errorf("%w:%w", err, errCond)
+ lexemes = lexemes[i+1:]
+ break
+ }
+ r.Condition = append(r.Condition, *condition)
+ } else {
+ if actionType {
+ objectTargeted = true
+ } else {
+ containerTargeted = true
+ }
+ if objectTargeted && containerTargeted {
+ // Actually, APE chain allows to define rules for several resources, for example, if
+ // chain target is namespace, but the parser primitevly compiles verbs,
+ // conditions and resources in one rule. So, for the parser, one rule relates only to
+ // one resource type - object or container.
+ return errMixedTypesInRule
+ }
+
+ r.Actions.Names = append(r.Actions.Names, names...)
+ }
+ }
+ r.Actions.Names = unique(r.Actions.Names)
+ if len(r.Actions.Names) == 0 {
+ return fmt.Errorf("%w:%w", err, errNoActionsInRule)
+ }
+ for _, lexeme := range lexemes {
+ resource, errRes := parseResource(lexeme, objectTargeted)
+ if errRes != nil {
+ return fmt.Errorf("%w:%w", err, errRes)
+ }
+ r.Resources.Names = append(r.Resources.Names, resource)
+ }
+
+ return nil
+}
+
+func parseAnyAll(lexeme string) (bool, error) {
+ switch strings.ToLower(lexeme) {
+ case "any":
+ return true, nil
+ case "all":
+ return false, nil
+ default:
+ return false, errFailedToParseAllAny
+ }
+}
+
+func parseStatus(lexeme string) (apechain.Status, error) {
+ action, expression, found := strings.Cut(lexeme, ":")
+ switch strings.ToLower(action) {
+ case "deny":
+ if !found {
+ return apechain.AccessDenied, nil
+ }
+ if strings.EqualFold(expression, "QuotaLimitReached") {
+ return apechain.QuotaLimitReached, nil
+ }
+ return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression)
+ case "allow":
+ if found {
+ return 0, errUnknownStatusDetail
+ }
+ return apechain.Allow, nil
+ default:
+ return 0, errUnknownStatus
+ }
+}
+
+func parseAction(lexeme string) ([]string, bool, error) {
+ switch strings.ToLower(lexeme) {
+ case "object.put":
+ return []string{nativeschema.MethodPutObject}, true, nil
+ case "object.get":
+ return []string{nativeschema.MethodGetObject}, true, nil
+ case "object.head":
+ return []string{nativeschema.MethodHeadObject}, true, nil
+ case "object.delete":
+ return []string{nativeschema.MethodDeleteObject}, true, nil
+ case "object.search":
+ return []string{nativeschema.MethodSearchObject}, true, nil
+ case "object.range":
+ return []string{nativeschema.MethodRangeObject}, true, nil
+ case "object.hash":
+ return []string{nativeschema.MethodHashObject}, true, nil
+ case "object.patch":
+ return []string{nativeschema.MethodPatchObject}, true, nil
+ case "object.*":
+ return []string{
+ nativeschema.MethodPutObject,
+ nativeschema.MethodGetObject,
+ nativeschema.MethodHeadObject,
+ nativeschema.MethodDeleteObject,
+ nativeschema.MethodSearchObject,
+ nativeschema.MethodRangeObject,
+ nativeschema.MethodHashObject,
+ nativeschema.MethodPatchObject,
+ }, true, nil
+ case "container.put":
+ return []string{nativeschema.MethodPutContainer}, false, nil
+ case "container.delete":
+ return []string{nativeschema.MethodDeleteContainer}, false, nil
+ case "container.get":
+ return []string{nativeschema.MethodGetContainer}, false, nil
+ case "container.list":
+ return []string{nativeschema.MethodListContainers}, false, nil
+ case "container.*":
+ return []string{
+ nativeschema.MethodPutContainer,
+ nativeschema.MethodDeleteContainer,
+ nativeschema.MethodGetContainer,
+ nativeschema.MethodListContainers,
+ }, false, nil
+ default:
+ }
+ return nil, false, fmt.Errorf("%w: %s", errUnknownAction, lexeme)
+}
+
+func parseResource(lexeme string, isObj bool) (string, error) {
+ if len(lexeme) > 0 && !strings.HasSuffix(lexeme, "/") {
+ if isObj {
+ if lexeme == "*" {
+ return nativeschema.ResourceFormatAllObjects, nil
+ } else if lexeme == "/*" || lexeme == "root/*" {
+ return nativeschema.ResourceFormatRootObjects, nil
+ } else if strings.HasPrefix(lexeme, "/") {
+ lexeme = lexeme[1:]
+ delimCount := strings.Count(lexeme, "/")
+ if delimCount == 1 && len(lexeme) >= 3 { // container/object
+ return nativeschema.ObjectPrefix + "//" + lexeme, nil
+ }
+ } else {
+ delimCount := strings.Count(lexeme, "/")
+ if delimCount == 1 && len(lexeme) >= 3 ||
+ delimCount == 2 && len(lexeme) >= 5 { // namespace/container/object
+ return nativeschema.ObjectPrefix + "/" + lexeme, nil
+ }
+ }
+ } else {
+ if lexeme == "*" {
+ return nativeschema.ResourceFormatAllContainers, nil
+ } else if lexeme == "/*" || lexeme == "root/*" {
+ return nativeschema.ResourceFormatRootContainers, nil
+ } else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 {
+ lexeme = lexeme[1:]
+ delimCount := strings.Count(lexeme, "/")
+ if delimCount == 0 {
+ return nativeschema.ContainerPrefix + "//" + lexeme, nil
+ }
+ } else {
+ delimCount := strings.Count(lexeme, "/")
+ if delimCount == 1 && len(lexeme) > 3 { // namespace/container
+ return nativeschema.ContainerPrefix + "/" + lexeme, nil
+ }
+ }
+ }
+ }
+ return "", errUnsupportedResourceFormat
+}
+
+const (
+ ResourceCondition = "resourcecondition"
+ RequestCondition = "requestcondition"
+)
+
+var typeToCondKindType = map[string]apechain.ConditionKindType{
+ ResourceCondition: apechain.KindResource,
+ RequestCondition: apechain.KindRequest,
+}
+
+func parseCondition(lexeme string) (*apechain.Condition, error) {
+ typ, expression, found := strings.Cut(lexeme, ":")
+ typ = strings.ToLower(typ)
+
+ condKindType, ok := typeToCondKindType[typ]
+ if ok {
+ if !found {
+ return nil, fmt.Errorf("%w: %s", errInvalidConditionFormat, lexeme)
+ }
+
+ var cond apechain.Condition
+ cond.Kind = condKindType
+
+ lhs, rhs, binExpFound := strings.Cut(expression, "!=")
+ if !binExpFound {
+ lhs, rhs, binExpFound = strings.Cut(expression, "=")
+ if !binExpFound {
+ return nil, fmt.Errorf("%w: %s", errUnknownBinaryOperator, expression)
+ }
+ cond.Op = apechain.CondStringEquals
+ } else {
+ cond.Op = apechain.CondStringNotEquals
+ }
+
+ cond.Key, cond.Value = lhs, rhs
+ return &cond, nil
+ }
+ return nil, fmt.Errorf("%w: %s", errUnknownCondObjectType, typ)
+}
diff --git a/pkg/util/ape/parser_test.go b/pkg/util/ape/parser_test.go
new file mode 100644
index 0000000000..c236c46037
--- /dev/null
+++ b/pkg/util/ape/parser_test.go
@@ -0,0 +1,328 @@
+package ape
+
+import (
+ "fmt"
+ "testing"
+
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseAPERule(t *testing.T) {
+ tests := [...]struct {
+ name string
+ rule string
+ expectErr error
+ expectRule policyengine.Rule
+ }{
+ {
+ name: "Valid allow rule for all objects",
+ rule: "allow Object.Put *",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
+ },
+ },
+ {
+ name: "Valid rule for all objects in implicit root namespace",
+ rule: "allow Object.Put /*",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
+ },
+ },
+ {
+ name: "Valid rule for all objects in explicit root namespace",
+ rule: "allow Object.Put root/*",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
+ },
+ },
+ {
+ name: "Valid rule for all containers in explicit root namespace",
+ rule: "allow Container.Put root/*",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}},
+ },
+ },
+ {
+ name: "Valid rule for all objects in root namespace and container",
+ rule: "allow Object.Put /cid/*",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, "cid"),
+ }},
+ },
+ },
+ {
+ name: "Valid rule for object in root namespace and container",
+ rule: "allow Object.Put /cid/oid",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, "cid", "oid"),
+ }},
+ },
+ },
+ {
+ name: "Valid rule for all objects in namespace",
+ rule: "allow Object.Put ns/*",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatNamespaceObjects, "ns"),
+ }},
+ },
+ },
+ {
+ name: "Valid rule for all objects in namespace and container",
+ rule: "allow Object.Put ns/cid/*",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, "ns", "cid"),
+ }},
+ },
+ },
+ {
+ name: "Valid rule for object in namespace and container",
+ rule: "allow Object.Put ns/cid/oid",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{
+ fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObject, "ns", "cid", "oid"),
+ }},
+ },
+ },
+ {
+ name: "Valid deny rule",
+ rule: "deny Object.Put *",
+ expectRule: policyengine.Rule{
+ Status: policyengine.AccessDenied,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
+ },
+ },
+ {
+ name: "Valid deny rule with action detail",
+ rule: "deny:QuotaLimitReached Object.Put *",
+ expectRule: policyengine.Rule{
+ Status: policyengine.QuotaLimitReached,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
+ },
+ },
+ {
+ name: "Valid allow rule with conditions",
+ rule: "allow Object.Get ResourceCondition:Department=HR RequestCondition:Actor!=ownerA *",
+ expectRule: policyengine.Rule{
+ Status: policyengine.Allow,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodGetObject}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
+ Condition: []policyengine.Condition{
+ {
+ Op: policyengine.CondStringEquals,
+ Kind: policyengine.KindResource,
+ Key: "Department",
+ Value: "HR",
+ },
+ {
+ Op: policyengine.CondStringNotEquals,
+ Kind: policyengine.KindRequest,
+ Key: "Actor",
+ Value: "ownerA",
+ },
+ },
+ },
+ },
+ {
+ name: "Valid rule for object with conditions with action detail",
+ rule: "deny:QuotaLimitReached Object.Get ResourceCondition:Department=HR RequestCondition:Actor!=ownerA *",
+ expectRule: policyengine.Rule{
+ Status: policyengine.QuotaLimitReached,
+ Actions: policyengine.Actions{Names: []string{nativeschema.MethodGetObject}},
+ Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
+ Condition: []policyengine.Condition{
+ {
+ Op: policyengine.CondStringEquals,
+ Kind: policyengine.KindResource,
+ Key: "Department",
+ Value: "HR",
+ },
+ {
+ Op: policyengine.CondStringNotEquals,
+ Kind: policyengine.KindRequest,
+ Key: "Actor",
+ Value: "ownerA",
+ },
+ },
+ },
+ },
+ {
+ name: "Invalid rule with unknown status",
+ rule: "permit Object.Put *",
+ expectErr: errUnknownStatus,
+ },
+ {
+ name: "Invalid rule with unknown action",
+ rule: "allow Object.PutOut *",
+ expectErr: errUnknownAction,
+ },
+ {
+ name: "Invalid rule with unknown status detail",
+ rule: "deny:UnknownActionDetail Object.Put *",
+ expectErr: errUnknownStatusDetail,
+ },
+ {
+ name: "Invalid rule with unknown condition binary operator",
+ rule: "deny Object.Put ResourceCondition:Department
",
Short: "Generate documentation for this command",
@@ -65,11 +106,7 @@ In this case there is a number of helper functions which can be used:
case gendocMarkdown:
return doc.GenMarkdownTree(rootCmd, args[0])
case gendocMan:
- hdr := &doc.GenManHeader{
- Section: "1",
- Source: "NSPCC & Morphbits",
- }
- return doc.GenManTree(rootCmd, hdr, args[0])
+ return doc.GenManTree(rootCmd, opts.ManHeader, args[0])
default:
return errors.New("type must be 'md' or 'man'")
}
@@ -77,9 +114,9 @@ In this case there is a number of helper functions which can be used:
}
ff := gendocCmd.Flags()
- ff.StringP(gendocTypeFlag, "t", gendocMarkdown, "Type for the documentation ('md' or 'man')")
- ff.Int(depthFlag, 1, "If template is specified, unify all commands starting from depth in a single file. Default: 1.")
- ff.StringP(extensionFlag, "e", "", "If the template is specified, string to append to the output file names")
+ ff.String(opts.TypeFlag, gendocMarkdown, "Type for the documentation ('md' or 'man')")
+ ff.Int(opts.DepthFlag, 1, "If template is specified, unify all commands starting from depth in a single file. Default: 1.")
+ ff.String(opts.ExtensionFlag, "", "If the template is specified, string to append to the output file names")
return gendocCmd
}
diff --git a/pkg/util/http/calls.go b/pkg/util/http/calls.go
index a9877e007f..8569ec7341 100644
--- a/pkg/util/http/calls.go
+++ b/pkg/util/http/calls.go
@@ -32,8 +32,8 @@ func (x *Server) Serve() error {
//
// Once Shutdown has been called on a server, it may not be reused;
// future calls to Serve method will have no effect.
-func (x *Server) Shutdown() error {
- ctx, cancel := context.WithTimeout(context.Background(), x.shutdownTimeout)
+func (x *Server) Shutdown(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), x.shutdownTimeout)
err := x.srv.Shutdown(ctx)
diff --git a/pkg/util/http/pprof.go b/pkg/util/http/pprof.go
index 7a04130001..f85fd2ea9b 100644
--- a/pkg/util/http/pprof.go
+++ b/pkg/util/http/pprof.go
@@ -3,8 +3,14 @@ package httputil
import (
"net/http"
"net/http/pprof"
+
+ "github.com/felixge/fgprof"
)
+func init() {
+ http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler())
+}
+
// initializes pprof package in order to
// register Prometheus handlers on http.DefaultServeMux.
var _ = pprof.Handler("")
diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go
index 923412a7f2..2589ab786c 100644
--- a/pkg/util/http/server.go
+++ b/pkg/util/http/server.go
@@ -76,8 +76,7 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server {
o(c)
}
- switch {
- case c.shutdownTimeout <= 0:
+ if c.shutdownTimeout <= 0 {
panicOnOptValue("shutdown timeout", c.shutdownTimeout)
}
diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go
index bc5f2e76eb..6337039a9b 100644
--- a/pkg/util/keyer/dashboard.go
+++ b/pkg/util/keyer/dashboard.go
@@ -1,12 +1,12 @@
package keyer
import (
- "crypto/elliptic"
"encoding/hex"
"fmt"
"os"
"text/tabwriter"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -45,7 +45,7 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) {
if d.pubKey != nil {
if uncompressed {
- data = elliptic.Marshal(elliptic.P256(), d.pubKey.X, d.pubKey.Y)
+ data = d.pubKey.UncompressedBytes()
} else {
data = d.pubKey.Bytes()
}
@@ -105,9 +105,7 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) {
func base58ToHex(data string) string {
val, err := base58.Decode(data)
- if err != nil {
- panic("produced incorrect base58 value")
- }
+ assert.NoError(err, "produced incorrect base58 value")
return hex.EncodeToString(val)
}
diff --git a/pkg/util/locode/column/coordinates.go b/pkg/util/locode/column/coordinates.go
deleted file mode 100644
index 5e32c016e1..0000000000
--- a/pkg/util/locode/column/coordinates.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package locodecolumn
-
-import (
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
-)
-
-const (
- minutesDigits = 2
- hemisphereSymbols = 1
-)
-
-const (
- latDegDigits = 2
- lngDegDigits = 3
-)
-
-type coordinateCode struct {
- degDigits int
- value []uint8
-}
-
-// LongitudeCode represents the value of the longitude
-// of the location conforming to UN/LOCODE specification.
-type LongitudeCode coordinateCode
-
-// LongitudeHemisphere represents the hemisphere of the earth
-// // along the Greenwich meridian.
-type LongitudeHemisphere [hemisphereSymbols]uint8
-
-// LatitudeCode represents the value of the latitude
-// of the location conforming to UN/LOCODE specification.
-type LatitudeCode coordinateCode
-
-// LatitudeHemisphere represents the hemisphere of the earth
-// along the equator.
-type LatitudeHemisphere [hemisphereSymbols]uint8
-
-func coordinateFromString(s string, degDigits int, hemisphereAlphabet []uint8) (*coordinateCode, error) {
- if len(s) != degDigits+minutesDigits+hemisphereSymbols {
- return nil, locode.ErrInvalidString
- }
-
- for i := range s[:degDigits+minutesDigits] {
- if !isDigit(s[i]) {
- return nil, locode.ErrInvalidString
- }
- }
-
-loop:
- for _, sym := range s[degDigits+minutesDigits:] {
- for j := range hemisphereAlphabet {
- if hemisphereAlphabet[j] == uint8(sym) {
- continue loop
- }
- }
-
- return nil, locode.ErrInvalidString
- }
-
- return &coordinateCode{
- degDigits: degDigits,
- value: []uint8(s),
- }, nil
-}
-
-// LongitudeFromString parses a string and returns the location's longitude.
-func LongitudeFromString(s string) (*LongitudeCode, error) {
- cc, err := coordinateFromString(s, lngDegDigits, []uint8{'W', 'E'})
- if err != nil {
- return nil, err
- }
-
- return (*LongitudeCode)(cc), nil
-}
-
-// LatitudeFromString parses a string and returns the location's latitude.
-func LatitudeFromString(s string) (*LatitudeCode, error) {
- cc, err := coordinateFromString(s, latDegDigits, []uint8{'N', 'S'})
- if err != nil {
- return nil, err
- }
-
- return (*LatitudeCode)(cc), nil
-}
-
-func (cc *coordinateCode) degrees() []uint8 {
- return cc.value[:cc.degDigits]
-}
-
-// Degrees returns the longitude's degrees.
-func (lc *LongitudeCode) Degrees() (l [lngDegDigits]uint8) {
- copy(l[:], (*coordinateCode)(lc).degrees())
- return
-}
-
-// Degrees returns the latitude's degrees.
-func (lc *LatitudeCode) Degrees() (l [latDegDigits]uint8) {
- copy(l[:], (*coordinateCode)(lc).degrees())
- return
-}
-
-func (cc *coordinateCode) minutes() (mnt [minutesDigits]uint8) {
- for i := 0; i < minutesDigits; i++ {
- mnt[i] = cc.value[cc.degDigits+i]
- }
-
- return
-}
-
-// Minutes returns the longitude's minutes.
-func (lc *LongitudeCode) Minutes() [minutesDigits]uint8 {
- return (*coordinateCode)(lc).minutes()
-}
-
-// Minutes returns the latitude's minutes.
-func (lc *LatitudeCode) Minutes() [minutesDigits]uint8 {
- return (*coordinateCode)(lc).minutes()
-}
-
-// Hemisphere returns the longitude's hemisphere code.
-func (lc *LongitudeCode) Hemisphere() LongitudeHemisphere {
- return (*coordinateCode)(lc).hemisphere()
-}
-
-// Hemisphere returns the latitude's hemisphere code.
-func (lc *LatitudeCode) Hemisphere() LatitudeHemisphere {
- return (*coordinateCode)(lc).hemisphere()
-}
-
-func (cc *coordinateCode) hemisphere() (h [hemisphereSymbols]uint8) {
- for i := 0; i < hemisphereSymbols; i++ {
- h[i] = cc.value[cc.degDigits+minutesDigits+i]
- }
-
- return h
-}
-
-// North returns true for the northern hemisphere.
-func (h LatitudeHemisphere) North() bool {
- return h[0] == 'N'
-}
-
-// East returns true for the eastern hemisphere.
-func (h LongitudeHemisphere) East() bool {
- return h[0] == 'E'
-}
-
-// Coordinates represents the coordinates of the location from UN/LOCODE table.
-type Coordinates struct {
- lat *LatitudeCode
-
- lng *LongitudeCode
-}
-
-// Latitude returns the location's latitude.
-func (c *Coordinates) Latitude() *LatitudeCode {
- return c.lat
-}
-
-// Longitude returns the location's longitude.
-func (c *Coordinates) Longitude() *LongitudeCode {
- return c.lng
-}
-
-// CoordinatesFromString parses a string and returns the location's coordinates.
-func CoordinatesFromString(s string) (*Coordinates, error) {
- if len(s) == 0 {
- return nil, nil
- }
-
- strs := strings.Split(s, " ")
- if len(strs) != 2 {
- return nil, locode.ErrInvalidString
- }
-
- lat, err := LatitudeFromString(strs[0])
- if err != nil {
- return nil, fmt.Errorf("could not parse latitude: %w", err)
- }
-
- lng, err := LongitudeFromString(strs[1])
- if err != nil {
- return nil, fmt.Errorf("could not parse longitude: %w", err)
- }
-
- return &Coordinates{
- lat: lat,
- lng: lng,
- }, nil
-}
diff --git a/pkg/util/locode/column/country.go b/pkg/util/locode/column/country.go
deleted file mode 100644
index 7b29a97c5e..0000000000
--- a/pkg/util/locode/column/country.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package locodecolumn
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
-)
-
-const countryCodeLen = 2
-
-// CountryCode represents ISO 3166 alpha-2 Country Code.
-type CountryCode [countryCodeLen]uint8
-
-// Symbols returns digits of the country code.
-func (cc *CountryCode) Symbols() [countryCodeLen]uint8 {
- return *cc
-}
-
-// CountryCodeFromString parses a string and returns the country code.
-func CountryCodeFromString(s string) (*CountryCode, error) {
- if l := len(s); l != countryCodeLen {
- return nil, fmt.Errorf("incorrect country code length: expect: %d, got: %d",
- countryCodeLen,
- l,
- )
- }
-
- for i := range s {
- if !isUpperAlpha(s[i]) {
- return nil, locode.ErrInvalidString
- }
- }
-
- cc := CountryCode{}
- copy(cc[:], s)
-
- return &cc, nil
-}
diff --git a/pkg/util/locode/column/location.go b/pkg/util/locode/column/location.go
deleted file mode 100644
index 4303228fb7..0000000000
--- a/pkg/util/locode/column/location.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package locodecolumn
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
-)
-
-const locationCodeLen = 3
-
-// LocationCode represents 3-character code for the location.
-type LocationCode [locationCodeLen]uint8
-
-// Symbols returns characters of the location code.
-func (lc *LocationCode) Symbols() [locationCodeLen]uint8 {
- return *lc
-}
-
-// LocationCodeFromString parses a string and returns the location code.
-func LocationCodeFromString(s string) (*LocationCode, error) {
- if l := len(s); l != locationCodeLen {
- return nil, fmt.Errorf("incorrect location code length: expect: %d, got: %d",
- locationCodeLen,
- l,
- )
- }
-
- for i := range s {
- if !isUpperAlpha(s[i]) && !isDigit(s[i]) {
- return nil, locode.ErrInvalidString
- }
- }
-
- lc := LocationCode{}
- copy(lc[:], s)
-
- return &lc, nil
-}
diff --git a/pkg/util/locode/column/util.go b/pkg/util/locode/column/util.go
deleted file mode 100644
index 8da1f9a257..0000000000
--- a/pkg/util/locode/column/util.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package locodecolumn
-
-func isDigit(sym uint8) bool {
- return sym >= '0' && sym <= '9'
-}
-
-func isUpperAlpha(sym uint8) bool {
- return sym >= 'A' && sym <= 'Z'
-}
diff --git a/pkg/util/locode/db/airports/calls.go b/pkg/util/locode/db/airports/calls.go
deleted file mode 100644
index dac8cce8b8..0000000000
--- a/pkg/util/locode/db/airports/calls.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package airportsdb
-
-import (
- "encoding/csv"
- "errors"
- "fmt"
- "io"
- "os"
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
-)
-
-const (
- _ = iota - 1
-
- _ // Airport ID
- _ // Name
- airportCity
- airportCountry
- airportIATA
- _ // ICAO
- airportLatitude
- airportLongitude
- _ // Altitude
- _ // Timezone
- _ // DST
- _ // Tz database time zone
- _ // Type
- _ // Source
-
- airportFldNum
-)
-
-type record struct {
- city,
- country,
- iata,
- lat,
- lng string
-}
-
-// Get scans the records of the OpenFlights Airport to an in-memory table (once),
-// and returns an entry that matches the passed UN/LOCODE record.
-//
-// Records are matched if they have the same country code and either
-// same IATA code or same city name (location name in UN/LOCODE).
-//
-// Returns locodedb.ErrAirportNotFound if no entry matches.
-func (db *DB) Get(locodeRecord locode.Record) (*locodedb.AirportRecord, error) {
- if err := db.initAirports(); err != nil {
- return nil, err
- }
-
- records := db.mAirports[locodeRecord.LOCODE.CountryCode()]
-
- for i := range records {
- if locodeRecord.LOCODE.LocationCode() != records[i].iata &&
- locodeRecord.NameWoDiacritics != records[i].city {
- continue
- }
-
- lat, err := strconv.ParseFloat(records[i].lat, 64)
- if err != nil {
- return nil, err
- }
-
- lng, err := strconv.ParseFloat(records[i].lng, 64)
- if err != nil {
- return nil, err
- }
-
- return &locodedb.AirportRecord{
- CountryName: records[i].country,
- Point: locodedb.NewPoint(lat, lng),
- }, nil
- }
-
- return nil, locodedb.ErrAirportNotFound
-}
-
-const (
- _ = iota - 1
-
- countryName
- countryISOCode
- _ // dafif_code
-
- countryFldNum
-)
-
-// CountryName scans the records of the OpenFlights Country table to an in-memory table (once),
-// and returns the name of the country by code.
-//
-// Returns locodedb.ErrCountryNotFound if no entry matches.
-func (db *DB) CountryName(code *locodedb.CountryCode) (name string, err error) {
- if err = db.initCountries(); err != nil {
- return
- }
-
- argCode := code.String()
-
- for cName, cCode := range db.mCountries {
- if cCode == argCode {
- name = cName
- break
- }
- }
-
- if name == "" {
- err = locodedb.ErrCountryNotFound
- }
-
- return
-}
-
-func (db *DB) initAirports() (err error) {
- db.airportsOnce.Do(func() {
- db.mAirports = make(map[string][]record)
-
- if err = db.initCountries(); err != nil {
- return
- }
-
- err = db.scanWords(db.airports, airportFldNum, func(words []string) error {
- countryCode := db.mCountries[words[airportCountry]]
- if countryCode != "" {
- db.mAirports[countryCode] = append(db.mAirports[countryCode], record{
- city: words[airportCity],
- country: words[airportCountry],
- iata: words[airportIATA],
- lat: words[airportLatitude],
- lng: words[airportLongitude],
- })
- }
-
- return nil
- })
- })
-
- return
-}
-
-func (db *DB) initCountries() (err error) {
- db.countriesOnce.Do(func() {
- db.mCountries = make(map[string]string)
-
- err = db.scanWords(db.countries, countryFldNum, func(words []string) error {
- db.mCountries[words[countryName]] = words[countryISOCode]
-
- return nil
- })
- })
-
- return
-}
-
-var errScanInt = errors.New("interrupt scan")
-
-func (db *DB) scanWords(pm pathMode, num int, wordsHandler func([]string) error) error {
- tableFile, err := os.OpenFile(pm.path, os.O_RDONLY, pm.mode)
- if err != nil {
- return err
- }
-
- defer tableFile.Close()
-
- r := csv.NewReader(tableFile)
- r.ReuseRecord = true
-
- for {
- words, err := r.Read()
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- return err
- } else if ln := len(words); ln != num {
- return fmt.Errorf("unexpected number of words %d", ln)
- }
-
- if err := wordsHandler(words); err != nil {
- if errors.Is(err, errScanInt) {
- break
- }
-
- return err
- }
- }
-
- return nil
-}
diff --git a/pkg/util/locode/db/airports/db.go b/pkg/util/locode/db/airports/db.go
deleted file mode 100644
index acfa3fd607..0000000000
--- a/pkg/util/locode/db/airports/db.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package airportsdb
-
-import (
- "fmt"
- "io/fs"
- "sync"
-)
-
-// Prm groups the required parameters of the DB's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Path to OpenFlights Airport csv table.
- //
- // Must not be empty.
- AirportsPath string
-
- // Path to OpenFlights Countries csv table.
- //
- // Must not be empty.
- CountriesPath string
-}
-
-// DB is a descriptor of the OpenFlights database in csv format.
-//
-// For correct operation, DB must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// The DB is immediately ready to work through API.
-type DB struct {
- airports, countries pathMode
-
- airportsOnce, countriesOnce sync.Once
-
- mCountries map[string]string
-
- mAirports map[string][]record
-}
-
-type pathMode struct {
- path string
- mode fs.FileMode
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the DB.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created DB does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *DB {
- switch {
- case prm.AirportsPath == "":
- panicOnPrmValue("AirportsPath", prm.AirportsPath)
- case prm.CountriesPath == "":
- panicOnPrmValue("CountriesPath", prm.CountriesPath)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &DB{
- airports: pathMode{
- path: prm.AirportsPath,
- mode: o.airportMode,
- },
- countries: pathMode{
- path: prm.CountriesPath,
- mode: o.countryMode,
- },
- }
-}
diff --git a/pkg/util/locode/db/airports/opts.go b/pkg/util/locode/db/airports/opts.go
deleted file mode 100644
index 3799d9e270..0000000000
--- a/pkg/util/locode/db/airports/opts.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package airportsdb
-
-import (
- "io/fs"
-)
-
-// Option sets an optional parameter of DB.
-type Option func(*options)
-
-type options struct {
- airportMode, countryMode fs.FileMode
-}
-
-func defaultOpts() *options {
- return &options{
- airportMode: fs.ModePerm, // 0777
- countryMode: fs.ModePerm, // 0777
- }
-}
diff --git a/pkg/util/locode/db/boltdb/calls.go b/pkg/util/locode/db/boltdb/calls.go
deleted file mode 100644
index 171808af25..0000000000
--- a/pkg/util/locode/db/boltdb/calls.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package locodebolt
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "path/filepath"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
- "go.etcd.io/bbolt"
-)
-
-// Open opens an underlying BoltDB instance.
-//
-// Timeout of BoltDB opening is 3s (only for Linux or Darwin).
-//
-// Opens BoltDB in read-only mode if DB is read-only.
-func (db *DB) Open() error {
- // copy-paste from metabase:
- // consider universal Open/Close for BoltDB wrappers
-
- err := util.MkdirAllX(filepath.Dir(db.path), db.mode)
- if err != nil {
- return fmt.Errorf("could not create dir for BoltDB: %w", err)
- }
-
- db.bolt, err = bbolt.Open(db.path, db.mode, db.boltOpts)
- if err != nil {
- return fmt.Errorf("could not open BoltDB: %w", err)
- }
-
- return nil
-}
-
-// Close closes an underlying BoltDB instance.
-//
-// Must not be called before successful Open call.
-func (db *DB) Close() error {
- return db.bolt.Close()
-}
-
-func countryBucketKey(cc *locodedb.CountryCode) ([]byte, error) {
- return []byte(cc.String()), nil
-}
-
-func locationBucketKey(lc *locodedb.LocationCode) ([]byte, error) {
- return []byte(lc.String()), nil
-}
-
-type recordJSON struct {
- CountryName string
- LocationName string
- SubDivName string
- SubDivCode string
- Latitude float64
- Longitude float64
- Continent string
-}
-
-func recordValue(r locodedb.Record) ([]byte, error) {
- p := r.GeoPoint()
-
- rj := &recordJSON{
- CountryName: r.CountryName(),
- LocationName: r.LocationName(),
- SubDivName: r.SubDivName(),
- SubDivCode: r.SubDivCode(),
- Latitude: p.Latitude(),
- Longitude: p.Longitude(),
- Continent: r.Continent().String(),
- }
-
- return json.Marshal(rj)
-}
-
-func recordFromValue(data []byte) (*locodedb.Record, error) {
- rj := new(recordJSON)
-
- if err := json.Unmarshal(data, rj); err != nil {
- return nil, err
- }
-
- r := new(locodedb.Record)
- r.SetCountryName(rj.CountryName)
- r.SetLocationName(rj.LocationName)
- r.SetSubDivName(rj.SubDivName)
- r.SetSubDivCode(rj.SubDivCode)
- r.SetGeoPoint(locodedb.NewPoint(rj.Latitude, rj.Longitude))
-
- cont := locodedb.ContinentFromString(rj.Continent)
- r.SetContinent(&cont)
-
- return r, nil
-}
-
-// Put saves the record by key in an underlying BoltDB instance.
-//
-// Country code from the key is used for allocating the 1st level buckets.
-// Records are stored in country buckets by the location code from the key.
-// The records are stored in internal binary JSON format.
-//
-// Must not be called before successful Open call.
-// Must not be called in read-only mode: behavior is undefined.
-func (db *DB) Put(key locodedb.Key, rec locodedb.Record) error {
- return db.bolt.Update(func(tx *bbolt.Tx) error {
- countryKey, err := countryBucketKey(key.CountryCode())
- if err != nil {
- return err
- }
-
- bktCountry, err := tx.CreateBucketIfNotExists(countryKey)
- if err != nil {
- return fmt.Errorf("could not create country bucket: %w", err)
- }
-
- locationKey, err := locationBucketKey(key.LocationCode())
- if err != nil {
- return err
- }
-
- cont, err := recordValue(rec)
- if err != nil {
- return err
- }
-
- return bktCountry.Put(locationKey, cont)
- })
-}
-
-var errRecordNotFound = errors.New("record not found")
-
-// Get reads the record by key from underlying BoltDB instance.
-//
-// Returns an error if no record is presented by key in DB.
-//
-// Must not be called before successful Open call.
-func (db *DB) Get(key locodedb.Key) (rec *locodedb.Record, err error) {
- err = db.bolt.View(func(tx *bbolt.Tx) error {
- countryKey, err := countryBucketKey(key.CountryCode())
- if err != nil {
- return err
- }
-
- bktCountry := tx.Bucket(countryKey)
- if bktCountry == nil {
- return errRecordNotFound
- }
-
- locationKey, err := locationBucketKey(key.LocationCode())
- if err != nil {
- return err
- }
-
- data := bktCountry.Get(locationKey)
- if data == nil {
- return errRecordNotFound
- }
-
- rec, err = recordFromValue(data)
-
- return err
- })
-
- return
-}
diff --git a/pkg/util/locode/db/boltdb/db.go b/pkg/util/locode/db/boltdb/db.go
deleted file mode 100644
index 3d09a797da..0000000000
--- a/pkg/util/locode/db/boltdb/db.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package locodebolt
-
-import (
- "fmt"
- "io/fs"
-
- "go.etcd.io/bbolt"
-)
-
-// Prm groups the required parameters of the DB's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Path to BoltDB file with FrostFS location database.
- //
- // Must not be empty.
- Path string
-}
-
-// DB is a descriptor of the FrostFS BoltDB location database.
-//
-// For correct operation, DB must be created
-// using the constructor (New) based on the required parameters
-// and optional components.
-//
-// After successful creation,
-// DB must be opened through Open call. After successful opening,
-// DB is ready to work through API (until Close call).
-//
-// Upon completion of work with the DB, it must be closed
-// by Close method.
-type DB struct {
- path string
-
- mode fs.FileMode
-
- boltOpts *bbolt.Options
-
- bolt *bbolt.DB
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the DB.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created DB requires calling the Open method in order
-// to initialize required resources.
-func New(prm Prm, opts ...Option) *DB {
- switch {
- case prm.Path == "":
- panicOnPrmValue("Path", prm.Path)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &DB{
- path: prm.Path,
- mode: o.mode,
- boltOpts: o.boltOpts,
- }
-}
diff --git a/pkg/util/locode/db/boltdb/opts.go b/pkg/util/locode/db/boltdb/opts.go
deleted file mode 100644
index db0cccd3a0..0000000000
--- a/pkg/util/locode/db/boltdb/opts.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package locodebolt
-
-import (
- "io/fs"
- "os"
- "time"
-
- "go.etcd.io/bbolt"
-)
-
-// Option sets an optional parameter of DB.
-type Option func(*options)
-
-type options struct {
- mode fs.FileMode
-
- boltOpts *bbolt.Options
-}
-
-func defaultOpts() *options {
- return &options{
- mode: os.ModePerm, // 0777
- boltOpts: &bbolt.Options{
- Timeout: 3 * time.Second,
- },
- }
-}
-
-// ReadOnly enables read-only mode of the DB.
-//
-// Do not call DB.Put method on instances with
-// this option: the behavior is undefined.
-func ReadOnly() Option {
- return func(o *options) {
- o.boltOpts.ReadOnly = true
- }
-}
diff --git a/pkg/util/locode/db/continent.go b/pkg/util/locode/db/continent.go
deleted file mode 100644
index 863af7b571..0000000000
--- a/pkg/util/locode/db/continent.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package locodedb
-
-// Continent is an enumeration of Earth's continent.
-type Continent uint8
-
-const (
- // ContinentUnknown is an undefined Continent value.
- ContinentUnknown = iota
-
- // ContinentEurope corresponds to Europe.
- ContinentEurope
-
- // ContinentAfrica corresponds to Africa.
- ContinentAfrica
-
- // ContinentNorthAmerica corresponds to North America.
- ContinentNorthAmerica
-
- // ContinentSouthAmerica corresponds to South America.
- ContinentSouthAmerica
-
- // ContinentAsia corresponds to Asia.
- ContinentAsia
-
- // ContinentAntarctica corresponds to Antarctica.
- ContinentAntarctica
-
- // ContinentOceania corresponds to Oceania.
- ContinentOceania
-)
-
-// Is checks if c is the same continent as c2.
-func (c *Continent) Is(c2 Continent) bool {
- return *c == c2
-}
-
-func (c Continent) String() string {
- switch c {
- case ContinentUnknown:
- fallthrough
- default:
- return "Unknown"
- case ContinentEurope:
- return "Europe"
- case ContinentAfrica:
- return "Africa"
- case ContinentNorthAmerica:
- return "North America"
- case ContinentSouthAmerica:
- return "South America"
- case ContinentAsia:
- return "Asia"
- case ContinentAntarctica:
- return "Antarctica"
- case ContinentOceania:
- return "Oceania"
- }
-}
-
-// ContinentFromString returns Continent value
-// corresponding to the passed string representation.
-func ContinentFromString(str string) Continent {
- switch str {
- default:
- return ContinentUnknown
- case "Europe":
- return ContinentEurope
- case "Africa":
- return ContinentAfrica
- case "North America":
- return ContinentNorthAmerica
- case "South America":
- return ContinentSouthAmerica
- case "Asia":
- return ContinentAsia
- case "Antarctica":
- return ContinentAntarctica
- case "Oceania":
- return ContinentOceania
- }
-}
diff --git a/pkg/util/locode/db/continents/geojson/calls.go b/pkg/util/locode/db/continents/geojson/calls.go
deleted file mode 100644
index 34467d5a25..0000000000
--- a/pkg/util/locode/db/continents/geojson/calls.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package continentsdb
-
-import (
- "fmt"
- "os"
-
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
- "github.com/paulmach/orb"
- "github.com/paulmach/orb/geojson"
- "github.com/paulmach/orb/planar"
-)
-
-const continentProperty = "Continent"
-
-// PointContinent goes through all polygons and returns the continent
-// in which the point is located.
-//
-// Returns locodedb.ContinentUnknown if no entry matches.
-//
-// All GeoJSON feature are parsed from file once and stored in memory.
-func (db *DB) PointContinent(point *locodedb.Point) (*locodedb.Continent, error) {
- var err error
-
- db.once.Do(func() {
- err = db.init()
- })
-
- if err != nil {
- return nil, err
- }
-
- planarPoint := orb.Point{point.Longitude(), point.Latitude()}
-
- var (
- continent string
- minDst float64
- )
-
- for _, feature := range db.features {
- if multiPolygon, ok := feature.Geometry.(orb.MultiPolygon); ok {
- if planar.MultiPolygonContains(multiPolygon, planarPoint) {
- continent = feature.Properties.MustString(continentProperty)
- break
- }
- } else if polygon, ok := feature.Geometry.(orb.Polygon); ok {
- if planar.PolygonContains(polygon, planarPoint) {
- continent = feature.Properties.MustString(continentProperty)
- break
- }
- }
- distance := planar.DistanceFrom(feature.Geometry, planarPoint)
- if minDst == 0 || minDst > distance {
- minDst = distance
- continent = feature.Properties.MustString(continentProperty)
- }
- }
-
- c := continentFromString(continent)
-
- return &c, nil
-}
-
-func (db *DB) init() error {
- data, err := os.ReadFile(db.path)
- if err != nil {
- return fmt.Errorf("could not read data file: %w", err)
- }
-
- features, err := geojson.UnmarshalFeatureCollection(data)
- if err != nil {
- return fmt.Errorf("could not unmarshal GeoJSON feature collection: %w", err)
- }
-
- db.features = features.Features
-
- return nil
-}
-
-func continentFromString(c string) locodedb.Continent {
- switch c {
- default:
- return locodedb.ContinentUnknown
- case "Africa":
- return locodedb.ContinentAfrica
- case "Asia":
- return locodedb.ContinentAsia
- case "Europe":
- return locodedb.ContinentEurope
- case "North America":
- return locodedb.ContinentNorthAmerica
- case "South America":
- return locodedb.ContinentSouthAmerica
- case "Antarctica":
- return locodedb.ContinentAntarctica
- case "Australia", "Oceania":
- return locodedb.ContinentOceania
- }
-}
diff --git a/pkg/util/locode/db/continents/geojson/db.go b/pkg/util/locode/db/continents/geojson/db.go
deleted file mode 100644
index ee43bd8108..0000000000
--- a/pkg/util/locode/db/continents/geojson/db.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package continentsdb
-
-import (
- "fmt"
- "sync"
-
- "github.com/paulmach/orb/geojson"
-)
-
-// Prm groups the required parameters of the DB's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Path to polygons of Earth's continents in GeoJSON format.
- //
- // Must not be empty.
- Path string
-}
-
-// DB is a descriptor of the Earth's polygons in GeoJSON format.
-//
-// For correct operation, DB must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// The DB is immediately ready to work through API.
-type DB struct {
- path string
-
- once sync.Once
-
- features []*geojson.Feature
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the DB.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created DB does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *DB {
- switch {
- case prm.Path == "":
- panicOnPrmValue("Path", prm.Path)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &DB{
- path: prm.Path,
- }
-}
diff --git a/pkg/util/locode/db/continents/geojson/opts.go b/pkg/util/locode/db/continents/geojson/opts.go
deleted file mode 100644
index 59831fcc5f..0000000000
--- a/pkg/util/locode/db/continents/geojson/opts.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package continentsdb
-
-// Option sets an optional parameter of DB.
-type Option func(*options)
-
-type options struct{}
-
-func defaultOpts() *options {
- return &options{}
-}
diff --git a/pkg/util/locode/db/country.go b/pkg/util/locode/db/country.go
deleted file mode 100644
index 2d13c6ef97..0000000000
--- a/pkg/util/locode/db/country.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package locodedb
-
-import (
- "fmt"
-
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
-)
-
-// CountryCode represents a country code for
-// the storage in the FrostFS location database.
-type CountryCode locodecolumn.CountryCode
-
-// CountryCodeFromString parses a string UN/LOCODE country code
-// and returns a CountryCode.
-func CountryCodeFromString(s string) (*CountryCode, error) {
- cc, err := locodecolumn.CountryCodeFromString(s)
- if err != nil {
- return nil, fmt.Errorf("could not parse country code: %w", err)
- }
-
- return CountryFromColumn(cc)
-}
-
-// CountryFromColumn converts a UN/LOCODE country code to a CountryCode.
-func CountryFromColumn(cc *locodecolumn.CountryCode) (*CountryCode, error) {
- return (*CountryCode)(cc), nil
-}
-
-func (c *CountryCode) String() string {
- syms := (*locodecolumn.CountryCode)(c).Symbols()
- return string(syms[:])
-}
diff --git a/pkg/util/locode/db/db.go b/pkg/util/locode/db/db.go
deleted file mode 100644
index 2a0f26689f..0000000000
--- a/pkg/util/locode/db/db.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package locodedb
-
-import (
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
-)
-
-// SourceTable is an interface of the UN/LOCODE table.
-type SourceTable interface {
- // Must iterate over all entries of the table
- // and pass next entry to the handler.
- //
- // Must return handler's errors directly.
- IterateAll(func(locode.Record) error) error
-}
-
-// DB is an interface of FrostFS location database.
-type DB interface {
- // Must save the record by key in the database.
- Put(Key, Record) error
-
- // Must return the record by key from the database.
- Get(Key) (*Record, error)
-}
-
-// AirportRecord represents the entry in FrostFS airport database.
-type AirportRecord struct {
- // Name of the country where airport is located.
- CountryName string
-
- // Geo point where airport is located.
- Point *Point
-}
-
-// ErrAirportNotFound is returned by AirportRecord readers
-// when the required airport is not found.
-var ErrAirportNotFound = errors.New("airport not found")
-
-// AirportDB is an interface of FrostFS airport database.
-type AirportDB interface {
- // Must return the record by UN/LOCODE table record.
- //
- // Must return ErrAirportNotFound if there is no
- // related airport in the database.
- Get(locode.Record) (*AirportRecord, error)
-}
-
-// ContinentsDB is an interface of FrostFS continent database.
-type ContinentsDB interface {
- // Must return continent of the geo point.
- PointContinent(*Point) (*Continent, error)
-}
-
-var ErrSubDivNotFound = errors.New("subdivision not found")
-
-var ErrCountryNotFound = errors.New("country not found")
-
-// NamesDB is an interface of the FrostFS location namespace.
-type NamesDB interface {
- // Must resolve a country code to a country name.
- //
- // Must return ErrCountryNotFound if there is no
- // country with the provided code.
- CountryName(*CountryCode) (string, error)
-
- // Must resolve (country code, subdivision code) to
- // a subdivision name.
- //
- // Must return ErrSubDivNotFound if either country or
- // subdivision is not presented in database.
- SubDivName(*CountryCode, string) (string, error)
-}
-
-// FillDatabase generates the FrostFS location database based on the UN/LOCODE table.
-func FillDatabase(table SourceTable, airports AirportDB, continents ContinentsDB, names NamesDB, db DB) error {
- return table.IterateAll(func(tableRecord locode.Record) error {
- if tableRecord.LOCODE.LocationCode() == "" {
- return nil
- }
-
- dbKey, err := NewKey(tableRecord.LOCODE)
- if err != nil {
- return err
- }
-
- dbRecord, err := NewRecord(tableRecord)
- if err != nil {
- if errors.Is(err, errParseCoordinates) {
- return nil
- }
-
- return err
- }
-
- geoPoint := dbRecord.GeoPoint()
- countryName := ""
-
- if geoPoint == nil {
- airportRecord, err := airports.Get(tableRecord)
- if err != nil {
- if errors.Is(err, ErrAirportNotFound) {
- return nil
- }
-
- return err
- }
-
- geoPoint = airportRecord.Point
- countryName = airportRecord.CountryName
- }
-
- dbRecord.SetGeoPoint(geoPoint)
-
- if countryName == "" {
- countryName, err = names.CountryName(dbKey.CountryCode())
- if err != nil {
- if errors.Is(err, ErrCountryNotFound) {
- return nil
- }
-
- return err
- }
- }
-
- dbRecord.SetCountryName(countryName)
-
- if subDivCode := dbRecord.SubDivCode(); subDivCode != "" {
- subDivName, err := names.SubDivName(dbKey.CountryCode(), subDivCode)
- if err != nil {
- if errors.Is(err, ErrSubDivNotFound) {
- return nil
- }
-
- return err
- }
-
- dbRecord.SetSubDivName(subDivName)
- }
-
- continent, err := continents.PointContinent(geoPoint)
- if err != nil {
- return fmt.Errorf("could not calculate continent geo point: %w", err)
- } else if continent.Is(ContinentUnknown) {
- return nil
- }
-
- dbRecord.SetContinent(continent)
-
- return db.Put(*dbKey, *dbRecord)
- })
-}
-
-// LocodeRecord returns the record from the FrostFS location database
-// corresponding to the string representation of UN/LOCODE.
-func LocodeRecord(db DB, sLocode string) (*Record, error) {
- lc, err := locode.FromString(sLocode)
- if err != nil {
- return nil, fmt.Errorf("could not parse locode: %w", err)
- }
-
- key, err := NewKey(*lc)
- if err != nil {
- return nil, err
- }
-
- return db.Get(*key)
-}
diff --git a/pkg/util/locode/db/location.go b/pkg/util/locode/db/location.go
deleted file mode 100644
index d22979170b..0000000000
--- a/pkg/util/locode/db/location.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package locodedb
-
-import (
- "fmt"
-
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
-)
-
-// LocationCode represents a location code for
-// the storage in the FrostFS location database.
-type LocationCode locodecolumn.LocationCode
-
-// LocationCodeFromString parses a string UN/LOCODE location code
-// and returns a LocationCode.
-func LocationCodeFromString(s string) (*LocationCode, error) {
- lc, err := locodecolumn.LocationCodeFromString(s)
- if err != nil {
- return nil, fmt.Errorf("could not parse location code: %w", err)
- }
-
- return LocationFromColumn(lc)
-}
-
-// LocationFromColumn converts a UN/LOCODE country code to a LocationCode.
-func LocationFromColumn(cc *locodecolumn.LocationCode) (*LocationCode, error) {
- return (*LocationCode)(cc), nil
-}
-
-func (l *LocationCode) String() string {
- syms := (*locodecolumn.LocationCode)(l).Symbols()
- return string(syms[:])
-}
diff --git a/pkg/util/locode/db/point.go b/pkg/util/locode/db/point.go
deleted file mode 100644
index 72daebb2c5..0000000000
--- a/pkg/util/locode/db/point.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package locodedb
-
-import (
- "fmt"
- "strconv"
-
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
-)
-
-// Point represents a 2D geographic point.
-type Point struct {
- lat, lng float64
-}
-
-// NewPoint creates, initializes and returns a new Point.
-func NewPoint(lat, lng float64) *Point {
- return &Point{
- lat: lat,
- lng: lng,
- }
-}
-
-// Latitude returns the Point's latitude.
-func (p Point) Latitude() float64 {
- return p.lat
-}
-
-// Longitude returns the Point's longitude.
-func (p Point) Longitude() float64 {
- return p.lng
-}
-
-// PointFromCoordinates converts a UN/LOCODE coordinates to a Point.
-func PointFromCoordinates(crd *locodecolumn.Coordinates) (*Point, error) {
- if crd == nil {
- return nil, nil
- }
-
- cLat := crd.Latitude()
- cLatDeg := cLat.Degrees()
- cLatMnt := cLat.Minutes()
-
- lat, err := toDecimal(cLatDeg[:], cLatMnt[:])
- if err != nil {
- return nil, fmt.Errorf("could not parse latitude: %w", err)
- }
-
- if !cLat.Hemisphere().North() {
- lat = -lat
- }
-
- cLng := crd.Longitude()
- cLngDeg := cLng.Degrees()
- cLngMnt := cLng.Minutes()
-
- lng, err := toDecimal(cLngDeg[:], cLngMnt[:])
- if err != nil {
- return nil, fmt.Errorf("could not parse longitude: %w", err)
- }
-
- if !cLng.Hemisphere().East() {
- lng = -lng
- }
-
- return &Point{
- lat: lat,
- lng: lng,
- }, nil
-}
-
-func toDecimal(intRaw, minutesRaw []byte) (float64, error) {
- integer, err := strconv.ParseFloat(string(intRaw), 64)
- if err != nil {
- return 0, fmt.Errorf("could not parse integer part: %w", err)
- }
-
- decimal, err := minutesToDegrees(minutesRaw)
- if err != nil {
- return 0, fmt.Errorf("could not parse decimal part: %w", err)
- }
-
- return integer + decimal, nil
-}
-
-// minutesToDegrees converts minutes to decimal part of a degree.
-func minutesToDegrees(raw []byte) (float64, error) {
- minutes, err := strconv.ParseFloat(string(raw), 64)
- if err != nil {
- return 0, err
- }
-
- return minutes / 60, nil
-}
diff --git a/pkg/util/locode/db/point_test.go b/pkg/util/locode/db/point_test.go
deleted file mode 100644
index f91c0cf87f..0000000000
--- a/pkg/util/locode/db/point_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package locodedb
-
-import (
- "testing"
-
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
- "github.com/stretchr/testify/require"
-)
-
-func TestPointFromCoordinates(t *testing.T) {
- testCases := []struct {
- latGot, longGot string
- latWant, longWant float64
- }{
- {
- latGot: "5915N",
- longGot: "01806E",
- latWant: 59.25,
- longWant: 18.10,
- },
- {
- latGot: "1000N",
- longGot: "02030E",
- latWant: 10.00,
- longWant: 20.50,
- },
- {
- latGot: "0145S",
- longGot: "03512W",
- latWant: -01.75,
- longWant: -35.20,
- },
- }
-
- var (
- crd *locodecolumn.Coordinates
- point *Point
- err error
- )
-
- for _, test := range testCases {
- crd, err = locodecolumn.CoordinatesFromString(test.latGot + " " + test.longGot)
- require.NoError(t, err)
-
- point, err = PointFromCoordinates(crd)
- require.NoError(t, err)
-
- require.Equal(t, test.latWant, point.Latitude())
- require.Equal(t, test.longWant, point.Longitude())
- }
-}
diff --git a/pkg/util/locode/db/record.go b/pkg/util/locode/db/record.go
deleted file mode 100644
index 4c414079f5..0000000000
--- a/pkg/util/locode/db/record.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package locodedb
-
-import (
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
-)
-
-// Key represents the key in FrostFS location database.
-type Key struct {
- cc *CountryCode
-
- lc *LocationCode
-}
-
-// NewKey calculates Key from LOCODE.
-func NewKey(lc locode.LOCODE) (*Key, error) {
- country, err := CountryCodeFromString(lc.CountryCode())
- if err != nil {
- return nil, fmt.Errorf("could not parse country: %w", err)
- }
-
- location, err := LocationCodeFromString(lc.LocationCode())
- if err != nil {
- return nil, fmt.Errorf("could not parse location: %w", err)
- }
-
- return &Key{
- cc: country,
- lc: location,
- }, nil
-}
-
-// CountryCode returns the location's country code.
-func (k *Key) CountryCode() *CountryCode {
- return k.cc
-}
-
-// LocationCode returns the location code.
-func (k *Key) LocationCode() *LocationCode {
- return k.lc
-}
-
-// Record represents the entry in FrostFS location database.
-type Record struct {
- countryName string
-
- locationName string
-
- subDivName string
-
- subDivCode string
-
- p *Point
-
- cont *Continent
-}
-
-var errParseCoordinates = errors.New("invalid coordinates")
-
-// NewRecord calculates the Record from the UN/LOCODE table record.
-func NewRecord(r locode.Record) (*Record, error) {
- crd, err := locodecolumn.CoordinatesFromString(r.Coordinates)
- if err != nil {
- return nil, fmt.Errorf("%w: %v", errParseCoordinates, err)
- }
-
- point, err := PointFromCoordinates(crd)
- if err != nil {
- return nil, fmt.Errorf("could not parse geo point: %w", err)
- }
-
- return &Record{
- locationName: r.NameWoDiacritics,
- subDivCode: r.SubDiv,
- p: point,
- }, nil
-}
-
-// CountryName returns the country name.
-func (r *Record) CountryName() string {
- return r.countryName
-}
-
-// SetCountryName sets the country name.
-func (r *Record) SetCountryName(name string) {
- r.countryName = name
-}
-
-// LocationName returns the location name.
-func (r *Record) LocationName() string {
- return r.locationName
-}
-
-// SetLocationName sets the location name.
-func (r *Record) SetLocationName(name string) {
- r.locationName = name
-}
-
-// SubDivCode returns the subdivision code.
-func (r *Record) SubDivCode() string {
- return r.subDivCode
-}
-
-// SetSubDivCode sets the subdivision code.
-func (r *Record) SetSubDivCode(name string) {
- r.subDivCode = name
-}
-
-// SubDivName returns the subdivision name.
-func (r *Record) SubDivName() string {
- return r.subDivName
-}
-
-// SetSubDivName sets the subdivision name.
-func (r *Record) SetSubDivName(name string) {
- r.subDivName = name
-}
-
-// GeoPoint returns geo point of the location.
-func (r *Record) GeoPoint() *Point {
- return r.p
-}
-
-// SetGeoPoint sets geo point of the location.
-func (r *Record) SetGeoPoint(p *Point) {
- r.p = p
-}
-
-// Continent returns the location continent.
-func (r *Record) Continent() *Continent {
- return r.cont
-}
-
-// SetContinent sets the location continent.
-func (r *Record) SetContinent(c *Continent) {
- r.cont = c
-}
diff --git a/pkg/util/locode/record.go b/pkg/util/locode/record.go
deleted file mode 100644
index 7db746ff3b..0000000000
--- a/pkg/util/locode/record.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package locode
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-// LOCODE represents code from UN/LOCODE coding scheme.
-type LOCODE [2]string
-
-// Record represents a single record of the UN/LOCODE table.
-type Record struct {
- // Change Indicator.
- Ch string
-
- // Combination of a 2-character country code and a 3-character location code.
- LOCODE LOCODE
-
- // Name of the locations which has been allocated a UN/LOCODE.
- Name string
-
- // Names of the locations which have been allocated a UN/LOCODE without diacritic signs.
- NameWoDiacritics string
-
- // ISO 1-3 character alphabetic and/or numeric code for the administrative division of the country concerned.
- SubDiv string
-
- // 8-digit function classifier code for the location.
- Function string
-
- // Status of the entry by a 2-character code.
- Status string
-
- // Last date when the location was updated/entered.
- Date string
-
- // The IATA code for the location if different from location code in column LOCODE.
- IATA string
-
- // Geographical coordinates (latitude/longitude) of the location, if there is any.
- Coordinates string
-
- // Some general remarks regarding the UN/LOCODE in question.
- Remarks string
-}
-
-// ErrInvalidString is the error of incorrect string format of the LOCODE.
-var ErrInvalidString = errors.New("invalid string format in UN/Locode")
-
-// FromString parses string and returns LOCODE.
-//
-// If string has incorrect format, ErrInvalidString returns.
-func FromString(s string) (*LOCODE, error) {
- const (
- locationSeparator = " "
- locodePartsNumber = 2
- )
-
- words := strings.Split(s, locationSeparator)
- if ln := len(words); ln != locodePartsNumber {
- return nil, fmt.Errorf(
- "incorrect locode: it must consist of %d codes separated with a witespase, got: %d",
- locodePartsNumber,
- ln,
- )
- }
-
- l := new(LOCODE)
- copy(l[:], words)
-
- return l, nil
-}
-
-// CountryCode returns a string representation of country code.
-func (l *LOCODE) CountryCode() string {
- return l[0]
-}
-
-// LocationCode returns a string representation of location code.
-func (l *LOCODE) LocationCode() string {
- return l[1]
-}
diff --git a/pkg/util/locode/table/csv/calls.go b/pkg/util/locode/table/csv/calls.go
deleted file mode 100644
index 5f40865be3..0000000000
--- a/pkg/util/locode/table/csv/calls.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package csvlocode
-
-import (
- "encoding/csv"
- "errors"
- "io"
- "os"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
-)
-
-var errInvalidRecord = errors.New("invalid table record")
-
-// IterateAll scans a table record one-by-one, parses a UN/LOCODE record
-// from it and passes it to f.
-//
-// Returns f's errors directly.
-func (t *Table) IterateAll(f func(locode.Record) error) error {
- const wordsPerRecord = 12
-
- return t.scanWords(t.paths, wordsPerRecord, func(words []string) error {
- lc, err := locode.FromString(strings.Join(words[1:3], " "))
- if err != nil {
- return err
- }
-
- record := locode.Record{
- Ch: words[0],
- LOCODE: *lc,
- Name: words[3],
- NameWoDiacritics: words[4],
- SubDiv: words[5],
- Function: words[6],
- Status: words[7],
- Date: words[8],
- IATA: words[9],
- Coordinates: words[10],
- Remarks: words[11],
- }
-
- return f(record)
- })
-}
-
-const (
- _ = iota - 1
-
- subDivCountry
- subDivSubdivision
- subDivName
- _ // subDivLevel
-
- subDivFldNum
-)
-
-type subDivKey struct {
- countryCode,
- subDivCode string
-}
-
-type subDivRecord struct {
- name string
-}
-
-// SubDivName scans a table record to an in-memory table (once),
-// and returns the subdivision name of the country and the subdivision codes match.
-//
-// Returns locodedb.ErrSubDivNotFound if no entry matches.
-func (t *Table) SubDivName(countryCode *locodedb.CountryCode, code string) (string, error) {
- if err := t.initSubDiv(); err != nil {
- return "", err
- }
-
- rec, ok := t.mSubDiv[subDivKey{
- countryCode: countryCode.String(),
- subDivCode: code,
- }]
- if !ok {
- return "", locodedb.ErrSubDivNotFound
- }
-
- return rec.name, nil
-}
-
-func (t *Table) initSubDiv() (err error) {
- t.subDivOnce.Do(func() {
- t.mSubDiv = make(map[subDivKey]subDivRecord)
-
- err = t.scanWords([]string{t.subDivPath}, subDivFldNum, func(words []string) error {
- t.mSubDiv[subDivKey{
- countryCode: words[subDivCountry],
- subDivCode: words[subDivSubdivision],
- }] = subDivRecord{
- name: words[subDivName],
- }
-
- return nil
- })
- })
-
- return
-}
-
-var errScanInt = errors.New("interrupt scan")
-
-func (t *Table) scanWords(paths []string, fpr int, wordsHandler func([]string) error) error {
- var (
- rdrs = make([]io.Reader, 0, len(t.paths))
- closers = make([]io.Closer, 0, len(t.paths))
- )
-
- for i := range paths {
- file, err := os.OpenFile(paths[i], os.O_RDONLY, t.mode)
- if err != nil {
- return err
- }
-
- rdrs = append(rdrs, file)
- closers = append(closers, file)
- }
-
- defer func() {
- for i := range closers {
- _ = closers[i].Close()
- }
- }()
-
- r := csv.NewReader(io.MultiReader(rdrs...))
- r.ReuseRecord = true
- r.FieldsPerRecord = fpr
-
- for {
- words, err := r.Read()
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- return err
- } else if len(words) != fpr {
- return errInvalidRecord
- }
-
- if err := wordsHandler(words); err != nil {
- if errors.Is(err, errScanInt) {
- break
- }
-
- return err
- }
- }
-
- return nil
-}
diff --git a/pkg/util/locode/table/csv/opts.go b/pkg/util/locode/table/csv/opts.go
deleted file mode 100644
index 5aaffd7c17..0000000000
--- a/pkg/util/locode/table/csv/opts.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package csvlocode
-
-import (
- "io/fs"
-)
-
-// Option sets an optional parameter of Table.
-type Option func(*options)
-
-type options struct {
- mode fs.FileMode
-
- extraPaths []string
-}
-
-func defaultOpts() *options {
- return &options{
- mode: 0700,
- }
-}
-
-// WithExtraPaths returns an option to add extra paths
-// to UN/LOCODE tables in csv format.
-func WithExtraPaths(ps ...string) Option {
- return func(o *options) {
- o.extraPaths = append(o.extraPaths, ps...)
- }
-}
diff --git a/pkg/util/locode/table/csv/table.go b/pkg/util/locode/table/csv/table.go
deleted file mode 100644
index b84c2b705e..0000000000
--- a/pkg/util/locode/table/csv/table.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package csvlocode
-
-import (
- "fmt"
- "io/fs"
- "sync"
-)
-
-// Prm groups the required parameters of the Table's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Path to UN/LOCODE csv table.
- //
- // Must not be empty.
- Path string
-
- // Path to csv table of UN/LOCODE Subdivisions.
- //
- // Must not be empty.
- SubDivPath string
-}
-
-// Table is a descriptor of the UN/LOCODE table in csv format.
-//
-// For correct operation, Table must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// The Table is immediately ready to work through API.
-type Table struct {
- paths []string
-
- mode fs.FileMode
-
- subDivPath string
-
- subDivOnce sync.Once
-
- mSubDiv map[subDivKey]subDivRecord
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Table.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Table does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Table {
- switch {
- case prm.Path == "":
- panicOnPrmValue("Path", prm.Path)
- case prm.SubDivPath == "":
- panicOnPrmValue("SubDivPath", prm.SubDivPath)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &Table{
- paths: append(o.extraPaths, prm.Path),
- mode: o.mode,
- subDivPath: prm.SubDivPath,
- }
-}
diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go
new file mode 100644
index 0000000000..413b1d9aaf
--- /dev/null
+++ b/pkg/util/logger/log.go
@@ -0,0 +1,35 @@
+package logger
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
+ qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
+ "go.uber.org/zap"
+)
+
+func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Debug(msg, appendContext(ctx, fields...)...)
+}
+
+func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Info(msg, appendContext(ctx, fields...)...)
+}
+
+func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Warn(msg, appendContext(ctx, fields...)...)
+}
+
+func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) {
+ l.z.Error(msg, appendContext(ctx, fields...)...)
+}
+
+func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field {
+ if traceID := tracing.GetTraceID(ctx); traceID != "" {
+ fields = append(fields, zap.String("trace_id", traceID))
+ }
+ if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined {
+ fields = append(fields, zap.String("io_tag", ioTag))
+ }
+ return fields
+}
diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go
index 4a536368ac..a1998cb1a5 100644
--- a/pkg/util/logger/logger.go
+++ b/pkg/util/logger/logger.go
@@ -1,6 +1,11 @@
package logger
import (
+ "fmt"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/zapjournald"
+ "github.com/ssgreg/journald"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
@@ -8,8 +13,10 @@ import (
// Logger represents a component
// for writing messages to log.
type Logger struct {
- *zap.Logger
- lvl zap.AtomicLevel
+ z *zap.Logger
+ c zapcore.Core
+ t Tag
+ w bool
}
// Prm groups Logger's parameters.
@@ -18,22 +25,33 @@ type Logger struct {
// Parameters that have been connected to the Logger support its
// configuration changing.
//
-// Passing Prm after a successful connection via the NewLogger, connects
-// the Prm to a new instance of the Logger.
-//
-// See also Reload, SetLevelString.
+// See also Logger.Reload, SetLevelString.
type Prm struct {
- // link to the created Logger
- // instance; used for a runtime
- // reconfiguration
- _log *Logger
-
// support runtime rereading
level zapcore.Level
+ // SamplingHook hook for the zap.Logger
+ SamplingHook func(e zapcore.Entry, sd zapcore.SamplingDecision)
+
// do not support runtime rereading
+ dest string
+
+ // PrependTimestamp specifies whether to prepend a timestamp in the log
+ PrependTimestamp bool
+
+ // Options for zap.Logger
+ Options []zap.Option
+
+ // map of tag's bit masks to log level, overrides lvl
+ tl map[Tag]zapcore.Level
}
+const (
+ DestinationUndefined = ""
+ DestinationStdout = "stdout"
+ DestinationJournald = "journald"
+)
+
// SetLevelString sets the minimum logging level. Default is
// "info".
//
@@ -45,20 +63,20 @@ func (p *Prm) SetLevelString(s string) error {
return p.level.UnmarshalText([]byte(s))
}
-// Reload reloads configuration of a connected instance of the Logger.
-// Returns ErrLoggerNotConnected if no connection has been performed.
-// Returns any reconfiguration error from the Logger directly.
-func (p Prm) Reload() error {
- if p._log == nil {
- // incorrect logger usage
- panic("parameters are not connected to any Logger")
+func (p *Prm) SetDestination(d string) error {
+ if d != DestinationStdout && d != DestinationJournald {
+ return fmt.Errorf("invalid logger destination %s", d)
}
-
- return p._log.reload(p)
+ if p != nil {
+ p.dest = d
+ }
+ return nil
}
-func defaultPrm() *Prm {
- return new(Prm)
+// SetTags parses list of tags with log level.
+func (p *Prm) SetTags(tags [][]string) (err error) {
+ p.tl, err = parseTags(tags)
+ return err
}
// NewLogger constructs a new zap logger instance. Constructing with nil
@@ -72,32 +90,154 @@ func defaultPrm() *Prm {
// - ISO8601 time encoding.
//
// Logger records a stack trace for all messages at or above fatal level.
-func NewLogger(prm *Prm) (*Logger, error) {
- if prm == nil {
- prm = defaultPrm()
+func NewLogger(prm Prm) (*Logger, error) {
+ switch prm.dest {
+ case DestinationUndefined, DestinationStdout:
+ return newConsoleLogger(prm)
+ case DestinationJournald:
+ return newJournaldLogger(prm)
+ default:
+ return nil, fmt.Errorf("unknown destination %s", prm.dest)
+ }
+}
+
+func newConsoleLogger(prm Prm) (*Logger, error) {
+ c := zap.NewProductionConfig()
+ c.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
+ c.Encoding = "console"
+ if prm.SamplingHook != nil {
+ c.Sampling.Hook = prm.SamplingHook
}
- lvl := zap.NewAtomicLevelAt(prm.level)
+ if prm.PrependTimestamp {
+ c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ } else {
+ c.EncoderConfig.TimeKey = ""
+ }
- c := zap.NewProductionConfig()
- c.Level = lvl
- c.Encoding = "console"
- c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
-
- lZap, err := c.Build(
+ opts := []zap.Option{
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
- )
+ zap.AddCallerSkip(1),
+ }
+ opts = append(opts, prm.Options...)
+ lZap, err := c.Build(opts...)
if err != nil {
return nil, err
}
-
- l := &Logger{Logger: lZap, lvl: lvl}
- prm._log = l
+ l := &Logger{z: lZap, c: lZap.Core()}
+ l = l.WithTag(TagMain)
return l, nil
}
-func (l *Logger) reload(prm Prm) error {
- l.lvl.SetLevel(prm.level)
- return nil
+func newJournaldLogger(prm Prm) (*Logger, error) {
+ c := zap.NewProductionConfig()
+ if prm.SamplingHook != nil {
+ c.Sampling.Hook = prm.SamplingHook
+ }
+
+ if prm.PrependTimestamp {
+ c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ } else {
+ c.EncoderConfig.TimeKey = ""
+ }
+
+ encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
+
+ core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields)
+ coreWithContext := core.With([]zapcore.Field{
+ zapjournald.SyslogFacility(zapjournald.LogDaemon),
+ zapjournald.SyslogIdentifier(),
+ zapjournald.SyslogPid(),
+ })
+
+ var samplerOpts []zapcore.SamplerOption
+ if c.Sampling.Hook != nil {
+ samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook))
+ }
+ samplingCore := zapcore.NewSamplerWithOptions(
+ coreWithContext,
+ time.Second,
+ c.Sampling.Initial,
+ c.Sampling.Thereafter,
+ samplerOpts...,
+ )
+ opts := []zap.Option{
+ zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
+ zap.AddCallerSkip(1),
+ }
+ opts = append(opts, prm.Options...)
+ lZap := zap.New(samplingCore, opts...)
+ l := &Logger{z: lZap, c: lZap.Core()}
+ l = l.WithTag(TagMain)
+
+ return l, nil
+}
+
+// With create a child logger with new fields, don't affect the parent.
+// Throws panic if tag is unset.
+func (l *Logger) With(fields ...zap.Field) *Logger {
+ if l.t == 0 {
+ panic("tag is unset")
+ }
+ c := *l
+ c.z = l.z.With(fields...)
+ // With called under the logger
+ c.w = true
+ return &c
+}
+
+type core struct {
+ c zapcore.Core
+ l zap.AtomicLevel
+}
+
+func (c *core) Enabled(lvl zapcore.Level) bool {
+ return c.l.Enabled(lvl)
+}
+
+func (c *core) With(fields []zapcore.Field) zapcore.Core {
+ clone := *c
+ clone.c = clone.c.With(fields)
+ return &clone
+}
+
+func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
+ return c.c.Check(e, ce)
+}
+
+func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error {
+ return c.c.Write(e, fields)
+}
+
+func (c *core) Sync() error {
+ return c.c.Sync()
+}
+
+// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger.
+// Throws panic if provided unsupported tag.
+func (l *Logger) WithTag(tag Tag) *Logger {
+ if tag == 0 || tag > Tag(len(_Tag_index)-1) {
+ panic("unsupported tag " + tag.String())
+ }
+ if l.w {
+ panic("unsupported operation for the logger's state")
+ }
+ c := *l
+ c.t = tag
+ c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core {
+ return &core{
+ c: l.c.With([]zap.Field{zap.String("tag", tag.String())}),
+ l: tagToLogLevel[tag],
+ }
+ }))
+ return &c
+}
+
+func NewLoggerWrapper(z *zap.Logger) *Logger {
+ return &Logger{
+ z: z.WithOptions(zap.AddCallerSkip(1)),
+ t: TagMain,
+ c: z.Core(),
+ }
}
diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go
new file mode 100644
index 0000000000..b867ee6cc2
--- /dev/null
+++ b/pkg/util/logger/logger_test.go
@@ -0,0 +1,118 @@
+package logger
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zaptest/observer"
+)
+
+func BenchmarkLogger(b *testing.B) {
+ ctx := context.Background()
+ m := map[string]Prm{}
+
+ prm := Prm{}
+ require.NoError(b, prm.SetLevelString("debug"))
+ m["logging enabled"] = prm
+
+ prm = Prm{}
+ require.NoError(b, prm.SetLevelString("error"))
+ m["logging disabled"] = prm
+
+ prm = Prm{}
+ require.NoError(b, prm.SetLevelString("error"))
+ require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}}))
+ m["logging enabled via tags"] = prm
+
+ prm = Prm{}
+ require.NoError(b, prm.SetLevelString("debug"))
+ require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}}))
+ m["logging disabled via tags"] = prm
+
+ for k, v := range m {
+ b.Run(k, func(b *testing.B) {
+ logger, err := createLogger(v)
+ require.NoError(b, err)
+ UpdateLevelForTags(v)
+ b.ResetTimer()
+ b.ReportAllocs()
+ for range b.N {
+ logger.Info(ctx, "test info")
+ }
+ })
+ }
+}
+
+type testCore struct {
+ core zapcore.Core
+}
+
+func (c *testCore) Enabled(lvl zapcore.Level) bool {
+ return c.core.Enabled(lvl)
+}
+
+func (c *testCore) With(fields []zapcore.Field) zapcore.Core {
+ c.core = c.core.With(fields)
+ return c
+}
+
+func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
+ return ce.AddCore(e, c)
+}
+
+func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error {
+ return nil
+}
+
+func (c *testCore) Sync() error {
+ return c.core.Sync()
+}
+
+func createLogger(prm Prm) (*Logger, error) {
+ prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
+ tc := testCore{core: core}
+ return &tc
+ })}
+ return NewLogger(prm)
+}
+
+func TestLoggerOutput(t *testing.T) {
+ obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel))
+
+ prm := Prm{}
+ require.NoError(t, prm.SetLevelString("debug"))
+ prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core {
+ return obs
+ })}
+ loggerMain, err := NewLogger(prm)
+ require.NoError(t, err)
+ UpdateLevelForTags(prm)
+
+ loggerMainWith := loggerMain.With(zap.String("key", "value"))
+
+ require.Panics(t, func() {
+ loggerMainWith.WithTag(TagShard)
+ })
+ loggerShard := loggerMain.WithTag(TagShard)
+ loggerShard = loggerShard.With(zap.String("key1", "value1"))
+
+ loggerMorph := loggerMain.WithTag(TagMorph)
+ loggerMorph = loggerMorph.With(zap.String("key2", "value2"))
+
+ ctx := context.Background()
+ loggerMain.Debug(ctx, "main")
+ loggerMainWith.Debug(ctx, "main with")
+ loggerShard.Debug(ctx, "shard")
+ loggerMorph.Debug(ctx, "morph")
+
+ require.Len(t, logs.All(), 4)
+ require.Len(t, logs.FilterFieldKey("key").All(), 1)
+ require.Len(t, logs.FilterFieldKey("key1").All(), 1)
+ require.Len(t, logs.FilterFieldKey("key2").All(), 1)
+ require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2)
+ require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1)
+ require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1)
+}
diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result
new file mode 100644
index 0000000000..612fa29678
--- /dev/null
+++ b/pkg/util/logger/logger_test.result
@@ -0,0 +1,46 @@
+goos: linux
+goarch: amd64
+pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger
+cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
+BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op
+BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op
+PASS
+ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s
diff --git a/pkg/util/logger/metrics.go b/pkg/util/logger/metrics.go
new file mode 100644
index 0000000000..7e62e6383e
--- /dev/null
+++ b/pkg/util/logger/metrics.go
@@ -0,0 +1,48 @@
+package logger
+
+import (
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ logSubsystem = "logger"
+ logLevelLabel = "level"
+ logDroppedLabel = "dropped"
+)
+
+type LogMetrics interface {
+ Inc(level zapcore.Level, dropped bool)
+ GetSamplingHook() func(e zapcore.Entry, sd zapcore.SamplingDecision)
+}
+
+type logMetrics struct {
+ logCount *prometheus.CounterVec
+}
+
+func NewLogMetrics(namespace string) LogMetrics {
+ return &logMetrics{
+ logCount: metrics.NewCounterVec(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: logSubsystem,
+ Name: "entry_count",
+ Help: "Total log entries emitted or dropped by severity level",
+ }, []string{logLevelLabel, logDroppedLabel}),
+ }
+}
+
+func (m *logMetrics) Inc(level zapcore.Level, dropped bool) {
+ m.logCount.With(prometheus.Labels{
+ logLevelLabel: level.String(),
+ logDroppedLabel: strconv.FormatBool(dropped),
+ }).Inc()
+}
+
+func (m *logMetrics) GetSamplingHook() func(zapcore.Entry, zapcore.SamplingDecision) {
+ return func(e zapcore.Entry, sd zapcore.SamplingDecision) {
+ m.Inc(e.Level, sd == zapcore.LogDropped)
+ }
+}
diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go
new file mode 100644
index 0000000000..1b98f2e62c
--- /dev/null
+++ b/pkg/util/logger/tag_string.go
@@ -0,0 +1,43 @@
+// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT.
+
+package logger
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[TagMain-1]
+ _ = x[TagMorph-2]
+ _ = x[TagGrpcSvc-3]
+ _ = x[TagIr-4]
+ _ = x[TagProcessor-5]
+ _ = x[TagEngine-6]
+ _ = x[TagBlobovnicza-7]
+ _ = x[TagBlobovniczaTree-8]
+ _ = x[TagBlobstor-9]
+ _ = x[TagFSTree-10]
+ _ = x[TagGC-11]
+ _ = x[TagShard-12]
+ _ = x[TagWriteCache-13]
+ _ = x[TagDeleteSvc-14]
+ _ = x[TagGetSvc-15]
+ _ = x[TagSearchSvc-16]
+ _ = x[TagSessionSvc-17]
+ _ = x[TagTreeSvc-18]
+ _ = x[TagPolicer-19]
+ _ = x[TagReplicator-20]
+}
+
+const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator"
+
+var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148}
+
+func (i Tag) String() string {
+ i -= 1
+ if i >= Tag(len(_Tag_index)-1) {
+ return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _Tag_name[_Tag_index[i]:_Tag_index[i+1]]
+}
diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go
new file mode 100644
index 0000000000..a5386707e1
--- /dev/null
+++ b/pkg/util/logger/tags.go
@@ -0,0 +1,94 @@
+package logger
+
+import (
+ "fmt"
+ "strings"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+//go:generate stringer -type Tag -linecomment
+
+type Tag uint8
+
+const (
+ _ Tag = iota //
+ TagMain // main
+ TagMorph // morph
+ TagGrpcSvc // grpcsvc
+ TagIr // ir
+ TagProcessor // processor
+ TagEngine // engine
+ TagBlobovnicza // blobovnicza
+ TagBlobovniczaTree // blobovniczatree
+ TagBlobstor // blobstor
+ TagFSTree // fstree
+ TagGC // gc
+ TagShard // shard
+ TagWriteCache // writecache
+ TagDeleteSvc // deletesvc
+ TagGetSvc // getsvc
+ TagSearchSvc // searchsvc
+ TagSessionSvc // sessionsvc
+ TagTreeSvc // treesvc
+ TagPolicer // policer
+ TagReplicator // replicator
+
+ defaultLevel = zapcore.InfoLevel
+)
+
+var (
+ tagToLogLevel = map[Tag]zap.AtomicLevel{}
+ stringToTag = map[string]Tag{}
+)
+
+func init() {
+ for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ {
+ tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel)
+ stringToTag[i.String()] = i
+ }
+}
+
+// parseTags returns:
+// - map(always instantiated) of tag to custom log level for that tag;
+// - error if it occurred(map is empty).
+func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) {
+ m := make(map[Tag]zapcore.Level)
+ if len(raw) == 0 {
+ return m, nil
+ }
+ for _, item := range raw {
+ str, level := item[0], item[1]
+ if len(level) == 0 {
+ // It is not necessary to parse tags without level,
+ // because default log level will be used.
+ continue
+ }
+ var l zapcore.Level
+ err := l.UnmarshalText([]byte(level))
+ if err != nil {
+ return nil, err
+ }
+ tmp := strings.Split(str, ",")
+ for _, tagStr := range tmp {
+ tag, ok := stringToTag[strings.TrimSpace(tagStr)]
+ if !ok {
+ return nil, fmt.Errorf("unsupported tag %s", str)
+ }
+ m[tag] = l
+ }
+ }
+ return m, nil
+}
+
+func UpdateLevelForTags(prm Prm) {
+ for k, v := range tagToLogLevel {
+ nk, ok := prm.tl[k]
+ if ok {
+ v.SetLevel(nk)
+ } else {
+ v.SetLevel(prm.level)
+ }
+ }
+}
diff --git a/pkg/util/logger/test/logger.go b/pkg/util/logger/test/logger.go
index b2f0f3eb72..b5b0a31eb2 100644
--- a/pkg/util/logger/test/logger.go
+++ b/pkg/util/logger/test/logger.go
@@ -1,36 +1,20 @@
package test
import (
+ "testing"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zaptest"
)
-const sampling = 1000
-
// NewLogger creates a new logger.
-//
-// If debug, development logger is created.
-func NewLogger(debug bool) *logger.Logger {
- var l logger.Logger
- l.Logger = zap.L()
-
- if debug {
- cfg := zap.NewDevelopmentConfig()
- cfg.Sampling = &zap.SamplingConfig{
- Initial: sampling,
- Thereafter: sampling,
- }
-
- cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
-
- log, err := cfg.Build()
- if err != nil {
- panic("could not prepare logger: " + err.Error())
- }
-
- l.Logger = log
- }
-
- return &l
+func NewLogger(t testing.TB) *logger.Logger {
+ return logger.NewLoggerWrapper(
+ zaptest.NewLogger(t,
+ zaptest.Level(zapcore.DebugLevel),
+ zaptest.WrapOptions(zap.Development(), zap.AddCaller()),
+ ),
+ )
}
diff --git a/pkg/util/os.go b/pkg/util/os.go
index 1c4c97806c..30e08a8c36 100644
--- a/pkg/util/os.go
+++ b/pkg/util/os.go
@@ -6,5 +6,5 @@ import "os"
// but with +x for a user and a group. This makes the created
// dir openable regardless of the passed permissions.
func MkdirAllX(path string, perm os.FileMode) error {
- return os.MkdirAll(path, perm|0110)
+ return os.MkdirAll(path, perm|0o110)
}
diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go
index 97508f82af..a06296a07f 100644
--- a/pkg/util/rand/rand.go
+++ b/pkg/util/rand/rand.go
@@ -13,7 +13,7 @@ func Uint64() uint64 {
return source.Uint64()
}
-// Uint64 returns a random uint32 value.
+// Uint32 returns a random uint32 value.
func Uint32() uint32 {
return source.Uint32()
}
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
new file mode 100644
index 0000000000..bd15d0e8f4
--- /dev/null
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -0,0 +1,94 @@
+package sdnotify
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ ReadyEnabled = "READY=1"
+ StoppingEnabled = "STOPPING=1"
+ ReloadingEnabled = "RELOADING=1"
+)
+
+var (
+ socket *net.UnixAddr
+
+ errSocketVariableIsNotPresent = errors.New("\"NOTIFY_SOCKET\" environment variable is not present")
+ errSocketIsNotInitialized = errors.New("socket is not initialized")
+)
+
+// InitSocket initializes socket with provided name of
+// environment variable.
+func InitSocket() error {
+ notifySocket := os.Getenv("NOTIFY_SOCKET")
+ if notifySocket == "" {
+ return errSocketVariableIsNotPresent
+ }
+ socket = &net.UnixAddr{
+ Name: notifySocket,
+ Net: "unixgram",
+ }
+ return nil
+}
+
+// FlagAndStatus sends systemd a combination of a
+// well-known status and STATUS=%s{status}, separated by newline.
+func FlagAndStatus(status string) error {
+ if status == ReloadingEnabled {
+ // From https://www.man7.org/linux/man-pages/man5/systemd.service.5.html
+ //
+ // When initiating the reload process the service is
+ // expected to reply with a notification message via
+ // sd_notify(3) that contains the "RELOADING=1" field in
+ // combination with "MONOTONIC_USEC=" set to the current
+ // monotonic time (i.e. CLOCK_MONOTONIC in
+ // clock_gettime(2)) in μs, formatted as decimal string.
+ // Once reloading is complete another notification message
+ // must be sent, containing "READY=1".
+ //
+ // For MONOTONIC_USEC format refer to https://www.man7.org/linux/man-pages/man3/sd_notify.3.html
+ var ts unix.Timespec
+ if err := unix.ClockGettime(unix.CLOCK_MONOTONIC, &ts); err != nil {
+ return fmt.Errorf("clock_gettime: %w", err)
+ }
+ status += "\nMONOTONIC_USEC=" + strconv.FormatInt(ts.Nano()/1000, 10)
+ status += "\nSTATUS=RELOADING"
+ return Send(status)
+ }
+ status += "\nSTATUS=" + strings.TrimSuffix(status, "=1")
+ return Send(status)
+}
+
+// Status sends systemd notify STATUS=%s{status}.
+func Status(status string) error {
+ return Send("STATUS=" + status)
+}
+
+// ClearStatus resets the current service status previously set by Status.
+func ClearStatus() error {
+ return Status("")
+}
+
+// Send state through the notify socket if any.
+// If the notify socket was not detected, it returns an error.
+func Send(state string) error {
+ if socket == nil {
+ return errSocketIsNotInitialized
+ }
+ conn, err := net.DialUnix(socket.Net, nil, socket)
+ if err != nil {
+ return fmt.Errorf("can't open unix socket: %v", err)
+ }
+ defer conn.Close()
+ if _, err = conn.Write([]byte(state)); err != nil {
+ return fmt.Errorf("can't write into the unix socket: %v", err)
+ }
+ return nil
+}
diff --git a/pkg/util/state/storage.go b/pkg/util/state/storage.go
index 0485b14813..ee957f270e 100644
--- a/pkg/util/state/storage.go
+++ b/pkg/util/state/storage.go
@@ -19,7 +19,7 @@ var stateBucket = []byte("state")
// NewPersistentStorage creates a new instance of a storage with 0600 rights.
func NewPersistentStorage(path string) (*PersistentStorage, error) {
- db, err := bbolt.Open(path, 0600, nil)
+ db, err := bbolt.Open(path, 0o600, nil)
if err != nil {
return nil, fmt.Errorf("can't open bbolt at %s: %w", path, err)
}
diff --git a/pkg/util/sync/key_locker.go b/pkg/util/sync/key_locker.go
new file mode 100644
index 0000000000..97de0386d4
--- /dev/null
+++ b/pkg/util/sync/key_locker.go
@@ -0,0 +1,56 @@
+package sync
+
+import "sync"
+
+type locker struct {
+ mtx sync.Mutex
+ waiters int // not protected by mtx, must used outer mutex to update concurrently
+}
+
+type KeyLocker[K comparable] struct {
+ lockers map[K]*locker
+ lockersMtx sync.Mutex
+}
+
+func NewKeyLocker[K comparable]() *KeyLocker[K] {
+ return &KeyLocker[K]{
+ lockers: make(map[K]*locker),
+ }
+}
+
+func (l *KeyLocker[K]) Lock(key K) {
+ l.lockersMtx.Lock()
+
+ if locker, found := l.lockers[key]; found {
+ locker.waiters++
+ l.lockersMtx.Unlock()
+
+ locker.mtx.Lock()
+ return
+ }
+
+ locker := &locker{
+ waiters: 1,
+ }
+ locker.mtx.Lock()
+
+ l.lockers[key] = locker
+ l.lockersMtx.Unlock()
+}
+
+func (l *KeyLocker[K]) Unlock(key K) {
+ l.lockersMtx.Lock()
+ defer l.lockersMtx.Unlock()
+
+ locker, found := l.lockers[key]
+ if !found {
+ return
+ }
+
+ if locker.waiters == 1 {
+ delete(l.lockers, key)
+ }
+ locker.waiters--
+
+ locker.mtx.Unlock()
+}
diff --git a/pkg/util/sync/key_locker_test.go b/pkg/util/sync/key_locker_test.go
new file mode 100644
index 0000000000..259064ecf8
--- /dev/null
+++ b/pkg/util/sync/key_locker_test.go
@@ -0,0 +1,32 @@
+package sync
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestKeyLocker(t *testing.T) {
+ taken := false
+ eg, _ := errgroup.WithContext(context.Background())
+ keyLocker := NewKeyLocker[int]()
+ for range 100 {
+ eg.Go(func() error {
+ keyLocker.Lock(0)
+ defer keyLocker.Unlock(0)
+
+ require.False(t, taken)
+ taken = true
+ require.True(t, taken)
+ time.Sleep(10 * time.Millisecond)
+ taken = false
+ require.False(t, taken)
+
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+}
diff --git a/pkg/util/test/keys.go b/pkg/util/test/keys.go
deleted file mode 100644
index 3b87bfb3f0..0000000000
--- a/pkg/util/test/keys.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package test
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/x509"
- "encoding/hex"
- "strconv"
-)
-
-// Keys is a list of test private keys in hex format.
-var Keys = []string{
- "307702010104203ee1fd84dd7199925f8d32f897aaa7f2d6484aa3738e5e0abd03f8240d7c6d8ca00a06082a8648ce3d030107a1440342000475099c302b77664a2508bec1cae47903857b762c62713f190e8d99912ef76737f36191e4c0ea50e47b0e0edbae24fd6529df84f9bd63f87219df3a086efe9195",
- "3077020101042035f2b425109b17b1d8f3b5c50daea1091e27d2452bce1126080bd4b98de9bb67a00a06082a8648ce3d030107a144034200045188d33a3113ac77fea0c17137e434d704283c234400b9b70bcdf4829094374abb5818767e460a94f36046ffcef44576fa59ef0e5f31fb86351c06c3d84e156c",
- "30770201010420f20cd67ed4ea58307945f5e89a5e016b463fbcad610ee9a7b5e0094a780c63afa00a06082a8648ce3d030107a14403420004c4c574d1bbe7efb2feaeed99e6c03924d6d3c9ad76530437d75c07bff3ddcc0f3f7ef209b4c5156b7395dfa4479dd6aca00d8b0419c2d0ff34de73fad4515694",
- "30770201010420335cd4300acc9594cc9a0b8c5b3b3148b29061d019daac1b97d0fbc884f0281ea00a06082a8648ce3d030107a14403420004563eece0b9035e679d28e2d548072773c43ce44a53cb7f30d3597052210dbb70674d8eefac71ca17b3dc6499c9167e833b2c079b2abfe87a5564c2014c6132ca",
- "30770201010420063a502c7127688e152ce705f626ca75bf0b62c5106018460f1b2a0d86567546a00a06082a8648ce3d030107a14403420004f8152966ad33b3c2622bdd032f5989fbd63a9a3af34e12eefee912c37defc8801ef16cc2c16120b3359b7426a7609af8f4185a05dcd42e115ae0df0758bc4b4c",
- "30770201010420714c3ae55534a1d065ea1213f40a3b276ec50c75eb37ee5934780e1a48027fa2a00a06082a8648ce3d030107a1440342000452d9fd2376f6b3bcb4706cad54ec031d95a1a70414129286c247cd2bc521f73fa8874a6a6466b9d111631645d891e3692688d19c052c244e592a742173ea8984",
- "30770201010420324b97d5f2c68e402b6723c600c3a7350559cc90018f9bfce0deed3d57890916a00a06082a8648ce3d030107a1440342000451ec65b2496b1d8ece3efe68a8b57ce7bc75b4171f07fa5b26c63a27fb4f92169c1b15150a8bace13f322b554127eca12155130c0b729872935fd714df05df5e",
- "3077020101042086ebcc716545e69a52a7f9a41404583e17984a20d96fafe9a98de0ac420a2f88a00a06082a8648ce3d030107a144034200045f7d63e18e6b896730f45989b7a8d00c0b86c75c2b834d903bc681833592bdcc25cf189e6ddef7b22217fd442b9825f17a985e7e2020b20188486dd53be9073e",
- "3077020101042021a5b7932133e23d4ebb7a39713defd99fc94edfc909cf24722754c9077f0d61a00a06082a8648ce3d030107a14403420004d351a4c87ec3b33e62610cb3fd197962c0081bbe1b1b888bc41844f4c6df9cd3fd4637a6f35aa3d4531fecc156b1707504f37f9ef154beebc622afc29ab3f896",
- "3077020101042081ef410f78e459fa110908048fc8923fe1e84d7ce75f78f32b8c114c572bfb87a00a06082a8648ce3d030107a144034200046e3859e6ab43c0f45b7891761f0da86a7b62f931f3d963efd3103924920a73b32ce5bc8f14d8fb31e63ccd336b0016eeb951323c915339ca6c4c1ebc01bbeb2b",
- "307702010104209dd827fa67faf3912e981b8dbccafb6ded908957ba67cf4c5f37c07d33abb6c5a00a06082a8648ce3d030107a14403420004e5cb5ae6a1bd3861a6b233c9e13fa0183319f601d0f4e99b27461e28f473e822de395d15c1e14d29a6bd4b597547e8c5d09a7dd3a722a739bb76936c1ad43c0e",
- "3077020101042005a03e332e1aff5273c52c38ec6c5a1593170ddf8d13989a8a160d894566fc6ba00a06082a8648ce3d030107a144034200045a11611542f07f2d5666de502994ef61f069674513811df42290254c26f71134100fed43ea8ecd9833be9abb42d95be8661f790c15b41ca20db5b4df4f664fb4",
- "307702010104206e833f66daf44696cafc63297ff88e16ba13feefa5b6ab3b92a771ff593e96d0a00a06082a8648ce3d030107a14403420004434e0e3ec85c1edaf614f91b7e3203ab4d8e7e1c8a2042223f882fc04da7b1f77f8f2ee3b290ecfa6470a1c416a22b368d05578beb25ec31bcf60aff2e3ffcd4",
- "30770201010420937c4796b9fc62fde4521c18289f0e610cf9b5ebf976be8d292bc8306cee2011a00a06082a8648ce3d030107a14403420004ba5951adddf8eb9bc5dac2c03a33584d321f902353c0aadccd3158256b294f5aa9cd5215201d74de2906630d8cefb4f298ff89caa29b5c90f9d15294f8d785bc",
- "307702010104204b002204533f9b2fb035087df7f4288e496fc84e09299765de7a6cd61e6a32bca00a06082a8648ce3d030107a1440342000441abcf37a4d0962156c549de8497120b87e5e370a967188ab1d2d7abce53711dfd692a37f30018e2d14030185b16a8e0b9ca61dca82bfe6d8fc55c836355b770",
- "3077020101042093ffa35f1977b170a0343986537de367f59ea5a8bd4a8fdd01c5d9700a7282dba00a06082a8648ce3d030107a144034200040e01090b297cf536740b5c0abb15afba03139b0d4b647fdd0c01d457936499c19283cf7b1aee2899923e879c97ddeffe4a1fa2bffc59d331b55982972524b45b",
- "307702010104201c1a2209a2b6f445fb63b9c6469d3edc01c99bab10957f0cbe5fad2b1c548975a00a06082a8648ce3d030107a144034200040c8fd2da7bad95b6b3782c0a742476ffcb35e5bc539ea19bbccb5ed05265da3ab51ec39afd01fbee800e05ec0eb94b68854cd9c3de6ab028d011c53085ffc1b3",
- "30770201010420b524d8cba99619f1f9559e2fe38b2c6d84a484d38574a92e56977f79eac8b537a00a06082a8648ce3d030107a14403420004a6d7d0db0cc0a46860fb912a7ace42c801d8d693e2678f07c3f5b9ea3cb0311169cbd96b0b9fc78f81e73d2d432b2c224d8d84380125ecc126481ee322335740",
- "307702010104207681725fec424a0c75985acfb7be7baed18b43ec7a18c0b47aa757849444557ca00a06082a8648ce3d030107a14403420004bd4453efc74d7dedf442b6fc249848c461a0c636bb6a85c86a194add1f8a5fac9bf0c04ece3f233c5aba2dee0d8a2a11b6a297edae60c0bc0536454ce0b5f9dd",
- "30770201010420ae43929b14666baa934684c20a03358cda860b89208824fac56b48f80920edc4a00a06082a8648ce3d030107a14403420004d706b0d86743d6052375aa5aa1a3613c87dccfe704dc85b4ed4f49a84a248a94582202927ec0c082234919f3ce6617152ba0d02497b81c61284261ce86cef905",
- "3077020101042089d600f43c47ab98e00225e9b2d4a6c7ab771490f856d4679d9e1e0cca3009d0a00a06082a8648ce3d030107a144034200048515055045543e429173fc8f9f56a070bd4314b2b3005437d8504e6b6885f85101409b933e27c0de11415aee516d0d1b474088a437ece496ceb4f1c131e9ea40",
- "3077020101042015518dcf888c7b241dac1c8bfa19d99f7fdba7ba37ed57d69bbbd95bb376ea4ca00a06082a8648ce3d030107a1440342000459e88d92efaa5277d60948feaa0bcd14388da00e35f9bae8282985441788f8beb2b84b71b1ae8aa24d64bb83759b80e3f05c07a791ffe10079c0e1694d74618c",
- "307702010104203e840868a96e59ca10f048202cce02e51655a932ff0ac98a7b5589a8df17f580a00a06082a8648ce3d030107a14403420004f296414e914dcefd29bc8a493f8aedc683e5514a8ec5160637bee40ebaa85a421a363c8f7ce3ed113e97d2c4b6d9cd31d21698a54fce8d8e280a6be9ee4fbca9",
- "30770201010420aa746067891cf005286d56d53092f77961f828bf5bf11aade18c8a458090d39aa00a06082a8648ce3d030107a144034200044af5ad2dacbb32ab795ab734d26bae6c098bd2ba9ca607542174d61b49ca3c07786aeb0c96908793a63d4f20cd370a77b7ec65e6b285c6337764e7ae3cd5fa1c",
- "307702010104207135cbd831d52e778622c21ed035df9e3c6e4128de38fbf4d165a0583b5b4a29a00a06082a8648ce3d030107a1440342000412e2b9e11f288d8db60fbb00456f5969e2816a214a295d8e4d38fbacab6b0a7e0cdb8557e53d408244083f192d8a604d5b764ab44b467e34664ca82e012b60ab",
- "3077020101042064b839ca26c42e2e97e94da5589db2de18597a12d6167fdfe0d20e932de747a2a00a06082a8648ce3d030107a1440342000481e90c2173b720447ae28361149598a7245ed51c3881a89353da25b8e574b8c9b2d80b2563efe5d9a0184b57af2431116c8a4ad8071ef2764ca3d3744c638401",
- "30770201010420a56df8e6349520d27c36eb1e9675720c702d562842c859cd54b3d866f2cada30a00a06082a8648ce3d030107a14403420004dc08beb5b857f6da13ae1116e40a6e4e4b5aaebc8040eae0b3037c243b1c24def39de670380472df7aa98cb9e0f1132bc4afc0629d80a24c54b8ad600cb24cd4",
- "30770201010420bd2dd18485a9667673b2c38c2ad51cc756a199d18fe1100acf29b647a549171ea00a06082a8648ce3d030107a1440342000422825ffe8b3416b6755a7076a7dc6f746ff29ee0a4455dceb0f3262127d51c9bb53f2c204636da8d7a09961274d7c7ba2ef3c771e83fb996ffe3f9882c530ffd",
- "307702010104203058a0c8de5c6d4a5c7f64883e7d3c9f5097c8bc073cc482421e903b37123c06a00a06082a8648ce3d030107a14403420004f959705673c2f4112673e43d1d876ca71c64153abb6c9f58d1c3b3c1f8c213ee346833fb695eb533664d596a68e42150a21b405e3a08ed70af5f568275a7a79f",
- "307702010104202bd9035bf38e7c4580abc377a6e9c31aa9bdaff90af2ce688eda9a532c83875ea00a06082a8648ce3d030107a14403420004918010ea3387786c6a257996ec74d7ee4e1703b3b811118f4e89fabfef7c694495191848a0d590313a0be9784644ef98e0f0f7e50fed5bee3fa48d66edbcd2b5",
- "30770201010420aa055d6cbe96e1cfbe39530bc4b7a976baff53ce399956f0d8241750d3379990a00a06082a8648ce3d030107a1440342000444e8b6deda76c12320a8c5b7a48141ebf5dc9288df79a0f418ab92d82061d10118b8bce9fb200e5009a19fb0e19036762b3ef85440405f43225d6ee3350bf96c",
- "30770201010420b8712525a79c7bd3df2a9dbabde1a111078a7ef30687a2efe0f0c4b4a23f2aa0a00a06082a8648ce3d030107a144034200049dc9e3d836a834f6d14ae99dfc70ad9b65c84f351c8dbc4f9b1b61c238051fb1db23e43d4b6e17803e21ebc44fe2f66742e306daa8c4ca7d79c6dd01fc1a4e4e",
- "3077020101042086c18b56c4a2264b37c18a7937f026ab07ca6076eeea1ab90376492efb7875d9a00a06082a8648ce3d030107a144034200042f169311f2fae406de3c4a64fec94a22c35972281922a69e7657185997ae59fb3f69ac94295e58681cfbd263f8e6fbce144cc7925b71d90f57de3f3e10588321",
- "30770201010420f58221355e1b2da73d66de482ec1edcb8597f3967d00d1356f4678fea6ad67e6a00a06082a8648ce3d030107a14403420004238cc44f02fa566e249a9697a078b9d38eba06012d54a29a430843a18df7a0a4207d704a360399db95eca591f2f81b6c50390467f293a1623b4757bdb4138101",
- "30770201010420b10888a0157d524667fd575683bdcded4628a65149fde59b7340781b0cf2e36ea00a06082a8648ce3d030107a14403420004222ba11430b8719929c726aec74e8e70893e2960bc2bbee70fbaa6d88fa2a346adf0c450ea9823f0ba77d334fcd476ea036a62199338d7aa32e56c708d7a8caa",
- "30770201010420edf001bd24c92e4f65789aae228223e77df71ce9bbfd7ce4d236ea3648e1f7fea00a06082a8648ce3d030107a1440342000472693c95786ab9f4e7c923338ce98bd068e28b71f84b77e7adb378c2ce2d8f1a2e13833df1afe4569367d7a4eee3abf50124299a28045a0073ea324f5ddb45ea",
- "30770201010420e2649e591fc9072dd55573e41fc4ebfdf1db118951e4b7b2a98027ac9a4f7702a00a06082a8648ce3d030107a144034200046e34c9dea1836671f1ef259d7c3ee678c2f92d092af2518413fe9ba153a07ca8e9938784876e90cfa2989a00a83b1ac599c87a8d15be8001e46dfbfe018156a2",
- "3077020101042069cd9b710f25613794751aed951004c888d4611aefa45abc23abff218e608290a00a06082a8648ce3d030107a14403420004dcf8ff34ab841720ff8dc08b60a14f41689e65f979a1af69b5e106f4262a2cb0947c9619e980caf20b3e7c8f15e60fc31c5b611c8a58370ba8201c9b6b932bd4",
- "307702010104202898cef1944aaf90fddf433390323a02a79938568cf99f6c25bc9aa9e5cddb0aa00a06082a8648ce3d030107a1440342000491a1c20420f5005f5761419e4dcd0d9da0cf2ea4733f6d98a3d0c124f284cabdc65eafd9d2cad9b1122fca791c8b37997feed130c5725ea797cf07c61fb82734",
- "30770201010420e568bd3ffa639aa418e7d5bc9e83f3f56690ebf645015ff7f0e216d76045efd5a00a06082a8648ce3d030107a144034200042424b498297124037db950bf2a1e652ba7f977363f4f69d7308531d27bf392219d93cb78f4379b7ffb16f3e7be311e208af2409bd33000fd25a8707ac6bec76b",
- "307702010104205163d5d5eea4db97fccc692871f257842fdaca0eca967d29924242f7a2c56ad7a00a06082a8648ce3d030107a144034200044e2ca8312122039c3374db08851710d3b9a2efcbd8f5df004ec7b60a348aee32466f799b5957d39845f451071bb1f3bb99f25bf43196e7c772f7b84f39221b3a",
- "30770201010420301eb936d2737886ab2fbf670952f9ba0d324827b81801810bfd60c89e8ca862a00a06082a8648ce3d030107a14403420004455454b1f3828a2328a8925c4c98bd6e37dece276efb3299d8b7d78c9d7e6f978b14d021c07bae0c18a623fc52ab2fec1523a89b2fd0cda373e9c9442a3545f2",
- "3077020101042032c12a9bca8070c131b0a46944c17adf35eb44079f3c887fc3b93740bb9c03fca00a06082a8648ce3d030107a14403420004e61da413c4d5dbc6c004089d96a3cb55f4b20b70c544f3823a7a6322c53e134fcb8a885729ef284d68d23e0a58009d48b369f9c4f5a665a8880a48606491dd8a",
- "30770201010420aa2b40742722b81c6ffd5c47b94b8be747da259e172a82d27ebc525c8f46d17aa00a06082a8648ce3d030107a14403420004f87a863ed11592cf4f96e837038b105d155f5e09a31386ab4604234e8a975d49a9612b4597b7fb206087b70a26bce4aca31edb253530e6da83ce16beefa99f60",
- "307702010104202a70a0c827b4ce8d433e800ab0818b1401b220fadea75feff655251ee4317556a00a06082a8648ce3d030107a14403420004a5c9209fd53dc1ce2c873782ec507db5e0f9cc78292a84ecafc5bab16c2e4d786a882ad77ad999f3d6ba676ad80354ad376dabc4fa03a6c15ead3aa16f213bc5",
- "307702010104202787d04901f48c81774171ef2e2a4d440b81f7fa1f12ab93d8e79ffab3416a1ca00a06082a8648ce3d030107a14403420004010d32df4d50343609932a923f11422e3bea5fa1319fb8ce0cc800f66aa38b3f7fda1bc17c824278734baa3d9b7f52262eeacbca21304b74ba4795b5055b1e9f",
- "3077020101042032423728a897144d4fb95090ca0ac67a23eb22e2f7f925cbddaf542eeaec8faaa00a06082a8648ce3d030107a14403420004c37f9fec5b1be5b0286300ace6a5d25df8189d29604145a77b6578a4e3956ed3d9af48f8ee1e39868bba9e359e5444984f0428755e29d2012f235c9a56749148",
- "30770201010420d5bd2a3867937e0b903d19113e859ca9f6497f4af082894a6911cef3a3a12d35a00a06082a8648ce3d030107a14403420004435b2e891c46023f422119f18a04c75b9322ea4aaddd10a0568438310896388bf7037e98bd5979a6f0839acb07dead1f2f973640dcc11dcee1de8a07c0b3dd80",
- "30770201010420590edcf1f2b6ee6c1b836ace33b934597883a00ce84fe812a4b3e22432846972a00a06082a8648ce3d030107a14403420004183d7cad633cb0f4ab774f4dc19b9db87e7ef97b0f4d43ac395d2409dabbe5339dbad661c7c2fd05606e2edb08f8ace660f73bf5232011262d563603f61d2353",
- "30770201010420a0ea4e16cf8c7c641d70aea82192fb9303aab6e7b5cd72586ba287d50f4612d6a00a06082a8648ce3d030107a1440342000482a72d31e71f0aea778cb42b324abf853cb4e4e8d4b2ae0e5130480073e911f183134c047a7e1cd41a845a38057ea51a1527923518cbf47c3e195a9f44e1d242",
- "307702010104209e04b00c8d0f96ddb2fbb48cfc199905bfbfcc894acb77b56bf16a945a7c7d08a00a06082a8648ce3d030107a1440342000405efd203dcddfb66d514be0de2b35050b83e3738096cd35398165bfdbe34d34c0d96a4e6df503903c75c2c06b66b02b15cd7bf74c147d7a9f0a5e53b83c5762d",
- "30770201010420aa69f1cc2cb3482a12af4b1614d6dde01216f1cad1c9f03c681daa8648b75b37a00a06082a8648ce3d030107a1440342000474ffec1297420d0cf730b42942058699d803ab618e1e40ccf9cc17f71f62b3123d863fbf8fae37b6c958892af6151159f74e2a568917bfc2f4e00c55c32b52e7",
- "3077020101042090a04300e8d6ed9f44422a2cf93817604bf1f6233c4333ba0db20ab726852fa4a00a06082a8648ce3d030107a144034200049e6f2001baf2b6fb25e3273907ed7320f494de6b5882c4c4b9bcee7ddc60274e064cc68c64325c001f07a505722062d1ca9774a2cc1e0cd28fe5f807865bfcc1",
- "3077020101042088945c19c6ce3e63f8d8a421616391d83bec79a0c590f1607b247ffa0c677dd3a00a06082a8648ce3d030107a1440342000492d17d410f9eabf7ae4509a92494e9fe94a72947f24e60c5bb6e12b2cde3c1bfe5305a0d759138069d44268f174136971ecb752df602c282e48d40f43a8734e3",
- "3077020101042079d14eacdc4f21dc5284bd8487dcb2c22e9e53e71909474f922bf695f49cf23ea00a06082a8648ce3d030107a1440342000428039292c5bcf3593639bf5835ec9411ffd3ac236c0186697623930b5ca63f32ff41df5217e7def770d9a0de87f61526497bd9aaa95d924e0a17d85958e7c095",
- "30770201010420a6ac867ff8d00aaad23198415868a64e59217b4d22474752a146fcb52204dfa5a00a06082a8648ce3d030107a14403420004a5f37a779265c55cd4f5a7f3bffc4679395898046eb9d67d8670be39001de5a7bc010b0d218561626272989c5952e8e0d95d2590f78eec44dc62a46184956301",
- "30770201010420df446014577f6081113cd7d33c6ba91b9ac3d083e76f8873358f83129e2d0111a00a06082a8648ce3d030107a14403420004da0c932759f50ad705507f876138c2c6e012764abc8764a6dd609e6ad06099952b120be71690bc091591f1aa8d7d6e9365deddbc958bc87ff150358ad33f7537",
- "30770201010420b3351033eaaee3a9ea27cd7dc54aa2c8d787b14b7d428165f1a04a59c6d5b0f2a00a06082a8648ce3d030107a14403420004da3984fb8152403a9fb9068b16f9afb5c900f24230e205567b4405ee3cad2db3ff46968489d494b38d0c85fcc4aecccb61fc00dca54c8fd99ee5bf5e2616f1b7",
- "30770201010420deedbcef7f6821f6aab2b15ce198f5eb2064f6eb461a6b7776b4da35c81b1506a00a06082a8648ce3d030107a1440342000405422b86ce66b18e68f0fb14f28e4ed9b1f7ee84f57957f4e4b4c6b0c392e6357e4698fb707f590be1b915622ec8da476071a56919211f6e5e888284d4e33f06",
- "3077020101042078c3db0d3b1114cb99f1d0bea0d3aec9067b26964e2b85fe9df4789b24cb3da5a00a06082a8648ce3d030107a144034200046874e52d7d58b6697b407b0c0eea3cfeb528e34fca1589c5031e11aae1ad1f9280e7a4c37ddf28479cd07b4246ce9398e0e24f99946f87e08532fa26b8fb8016",
- "30770201010420f0ba42553b146cf088d3a5a3645782fe675d23561897ced7f1270a8d05cfdaaaa00a06082a8648ce3d030107a14403420004c250e12f3aa1fb6261c57cdb091cd90d82917e103711425888477b9da4359d2803aaf0015638294c7c0baa4ec77ba8fceff5ee7f15ea087a4174f58d518006dd",
- "307702010104207f2c0fc4b0e418b2d4c72a63fdc27f158f6ad44c26d161f489714525b6a13db1a00a06082a8648ce3d030107a144034200041d83885672021e783d8bd995d187f407bbda2c6bed5e8fabc7c6c5cb304a85eaffa12dad7ba874ac45f4258fffe07534843ff7fe76075470f2c77104d781688f",
- "30770201010420d3de828ac9742704d4e6981ce1fc8c473e508eda3a121cda420dacbdf39d48e9a00a06082a8648ce3d030107a14403420004c78abfc4a5c0eb3ee0c9817d1790b7ca9fd528d0bc727f9daf63f4212097538b6888b9de2ae4dff29895500be456fe0ccbee340aecb546d1558b08c3718aaa4a",
- "30770201010420d9c4e477b56f2ff0b211acd82b450336276534b350747315152a4923e6e65294a00a06082a8648ce3d030107a14403420004fbd540966b03fe2c2314f20248d345e3e9b92d6a7cfea22d1b5367f01b32d616f317e00cea1f659437b4302610abba8abb0f2bfce0a91b952e9565159c1e464e",
- "30770201010420fb84f4a426fa12920c2cf7c2d821280530c0fa93960ded8c20120511dc1d5069a00a06082a8648ce3d030107a14403420004c0177f13c6e00bb9029df089006a332192bdf12a782c60a8d00d110c53db67c344584f22677695a7f1629db1600b0559ced49ac931b08cc6a58e5ea436bde2f8",
- "30770201010420653ce060214028f7aa584910f0925d702bde18d52d8e530f07dd5004076eb614a00a06082a8648ce3d030107a1440342000433668d0c9085feae4b285fe260a316e24f24c0bb8e442583e23284bf5a962cd0357cd63ac4d1cdda58afb201bceee911ebe7cf134652dc4390f4e328f6cb5d65",
- "307702010104206123b7d5b8c53b2a2a95dd2e42fe550617b7520fe9bd94a99045addb828ad847a00a06082a8648ce3d030107a1440342000487c10fdeaabf8072dcea0dc5b18be4d72f2b8298bc891ea0a11d202438b7598ac588f16a9cd697f8220434d4e15ff4c82daaae63955525633335843069434aea",
- "3077020101042000b793c9b8553ee7bec21cd966f5aaff59a07d1fa3fa86e0164bcd2f7f4dd586a00a06082a8648ce3d030107a1440342000419d4179dbeae7fa87e356f0406c327239d34e540cd7db5174a81bd6197738bc72e46fe4bd1512dc4b35950b2c1e78e6f8f54980193be78d45e4d97a837455777",
- "307702010104200fb1a771004f6be6300eccd603b9c9e269fbdd69e5eb183d7acad51b0b205b88a00a06082a8648ce3d030107a14403420004d3b7fa62bacff49714ef28a955cdc30f4aef323293ac3aebab824892dfa3306f2ec319f5bca1771b956b4a9b1c2f565dc08b29c07ec84623932a5d6fb59be6c7",
- "30770201010420fe6907b91407619fdc95153cd59df061e88095678801008d3901f29c7c434243a00a06082a8648ce3d030107a14403420004796fcea7889128f8060b04e9000381fd3d80fe68f000063b182fe9d8984e740c387c4ed4c6729e8c715c576fe355a9b7dda6890c55b15ae6013fd51e8858b2f2",
- "30770201010420111eaff6db3b279d014b45b3da091909f054f37c350c237fe9d51b4342811299a00a06082a8648ce3d030107a144034200047d51f9178725c4134579ac6d0cb84745e0d2068ccf72d30c02dd431547f868d1cb93b5774c7e1eb9582e2151521ff16cdf80b3ba4646d64f7982066f9eb679f0",
- "30770201010420631d01e6aaa68e6c36e3425b984df02bc5b54e81951479f7cea8fd1b804bab57a00a06082a8648ce3d030107a14403420004fa1b1ed9ff904f1f050577e05b5175e897d462598fdd323c8ef25f6072dfa43034baa0119e64092fb44f7a04d59d16ba8645f52cfb7775a6536c00f7fc2ee2f1",
- "307702010104201ec553d14d45acdf147dba5fcbc3a42a1f763411d5c206d03600ed810b0cf106a00a06082a8648ce3d030107a14403420004e9a309a24d1061204087de10e5bc64b6d45369399a5a402d630ca2d04b34ae9d27d491e5fadd5d082e14454e6b2a572a24904ba2a8dc7430b20d361134188589",
- "307702010104206d31e401bb20968106a058f8df70cd5fb8e9aaca0b01a176649712aa594ff600a00a06082a8648ce3d030107a144034200048555a2f9e7256c57b406c729d2d8da12c009f219e81cecb522cb3c494dcc1c76ac6d2f641dafe816065482fb88916e1a719672c82406556e16c32cf90752a92f",
- "307702010104208ada3d6ea6000cecbfcc3eafc5d1b0674fabece2b4ed8e9192200021b8861da0a00a06082a8648ce3d030107a14403420004a99e7ed75a2e28e30d8bad1a779f2a48bded02db32b22715c804d8eeadfbf453d063f099874cb170a10d613f6b6b3be0dbdb44c79fc34f81f68aeff570193e78",
- "30770201010420d066dfb8f6ba957e19656d5b2362df0fb27075836ec7141ce344f76aa364c3cea00a06082a8648ce3d030107a14403420004597fd2183c21f6d04fa686e813cf7f838594e2e9c95b86ce34b8871674d78cc685b0918fd623e3019d8c7b67104395b1f94fc3338d0772e306572236bab59c39",
- "307702010104202c291b04d43060f4c2fd896b7a9b6b4f847fb590f6774b78a0dff2513b32f55ca00a06082a8648ce3d030107a14403420004e80bd7e6445ee6947616e235f59bbecbaa0a49737be3b969363ee8d3cfccbbc42a0a1282de0f27c135c34afad7e5c563c674e3d18f8abcad4a73c8c79dad3efa",
- "3077020101042029af306b5c8e677768355076ba86113411023024189e687d8b9c4dee12f156fda00a06082a8648ce3d030107a144034200049d7d21e6e1e586b5868853a3751618de597241215fb2328331d2f273299a11295fe6ccd5d990bf33cf0cdcda9944bf34094d5ffa4e5512ee4a55c9f5a8c25294",
- "3077020101042022e65c9fc484173b9c931261d54d2cf34b70deccb19ce0a84ce3b08bc2e0648ba00a06082a8648ce3d030107a14403420004ea9ee4ab7475ebaff6ea2a290fc77aafa4b893447d1a033f40400b4d62ee923a31d06fe5f28dbc2ebec467ebd2e002a9ea72057f0b0c60fe564584a6539376ad",
- "307702010104205000583dc21cb6fd26df1c7d6e4efb9b47ceff73c0d94ed453bae0c13a9e5795a00a06082a8648ce3d030107a144034200045a6a5b5886b01f54dfa0788f15d3542aec160843a57e723008d1b984dd572ecb8935662daaba53d756d45442efbae067f52b0b151899a645afb663205babddd3",
- "30770201010420997431e73eae00f476bb1a221b4cc9dfd18d787be207b7069141627f61ba752da00a06082a8648ce3d030107a144034200047c89dc8c46a27e20c37b0ecf1150e8b92c2dd4dc534a25545f87a5f0c44fdbf4dee2af5bcdc4012f0acee168aeb55bb4d24738fac105fc056928ff5870491047",
- "307702010104207dc10db95a597a80e916d7f8e4e419b609d767538fe9732bcc5f9d783c605a2ba00a06082a8648ce3d030107a144034200042e2ae4fae087a11fcdf9565670164c229337ed87b5056687c6bceeb84108db9a88b9e5d96a0cf121255ceefce0bb5239608768bb841e6687dbd9626222eb5187",
- "307702010104209056e22b347f5f1839f1a53f1250d098616ff04db0b49b1fddb18b987930cec7a00a06082a8648ce3d030107a1440342000427cc4c7fb5d7ac047161aee78e812ad264ba25dd878684637308674ea693817b20a5e3672de6a92dfbf82f641268052fa742e6f35ff91c617334f09f89bd1218",
- "30770201010420554ea6cfeb2cc4f1e29c08e65317d72731ee03940af9ff6a141b761d5d054db6a00a06082a8648ce3d030107a14403420004a6121746c0553ede0944da8a7f304831fcefb51b40acf78016d41cc45cc5f7e9a1b22bbea028daab5cb4c39cadf84da442749cbfc04536d6f85c3254ec7a0805",
- "30770201010420f53ff1c7db3c4e7c734bf7396a1a5364ac2dfe4b794b118aada6bab72cde8969a00a06082a8648ce3d030107a1440342000414b11ec158e3f9d558bd1da1ed0e38c92b1ad55834f3ce08e456747279dd9ed1143cff4f5e8d70189f4b114e3cd609105d6eb8f431f392487e4c9e16a152dba1",
- "30770201010420b3f394090547f5dcb2e77cef65e03a3b7d1c953cd0e069553da2795ab0adc950a00a06082a8648ce3d030107a14403420004a1a9dbe5d6dfa2dfb039aebabe96b12faf97c994e1430323d074ecbd90ef075e0fe9dc7d5eef2483d485ffb0b4a01b01e131754fb38059a1365d342d5175397a",
- "30770201010420bf13c42fa84c409161f9d73ce20fd85b20c5381914aa2a2375452b34cd352022a00a06082a8648ce3d030107a14403420004e0134214a5349a235cee406ad942ca105ef871a7e4c922ef4769466d8495c78b82f6c49270c8cd913e0cf407cdab679dd9914090ea91122ca9fb654ebcfce57d",
- "30770201010420440d975b65bf585d0813137fe041461de59221856eaf255479b5e69721cfb30da00a06082a8648ce3d030107a14403420004935a9626ddb7bd6fbcd2ad9d9333851bbc64b9997cb8e43b1a17f8e9968ed6b0e5d2edf105fbabc9bd745fa2120ac527bbfefb6e8ed96844f80b8e27b6d9a549",
- "307702010104209ea2dc59260408165d6c42205aa52e275f81c39d9bf5b1b9c8187ade875e8068a00a06082a8648ce3d030107a14403420004bc570aa24df0306cb761ee9fb22e61f59ae4f11e8804491d8651084f191c800d1e6b16e4bc3693b88f9bef82849f3cd6914a15cae60322c1f4822a2bdf426782",
- "30770201010420505b596fb71a2e36c0ba07da03442a721f3f1832dcac19631d6c11b36ab81986a00a06082a8648ce3d030107a1440342000472cfb26cf07faa4e6e9d328214677b5eb51cd2e35717ac661d732115e592a07482bf966a31792cc993bdf816a732069ed423871b53fb3c7eabab2f4d3d272013",
- "3077020101042089a9d5b397c521db4bb4a5f3e8f2043e43bb5617a2070e7bfa30dd2dbf1815a1a00a06082a8648ce3d030107a1440342000468d2aeaf641b839095644cfd4b72ab97d0bf3fae1ed36e9f81d9aff333b0123f7b846f6ca61dbbd4e10988e740463addef793994a1498987883ecf237f18bc40",
- "307702010104200919a89aedb4e20cfcd2cb568c8de18b1b60b5da17aaea3be9804eb5bc3280f5a00a06082a8648ce3d030107a14403420004139812ec6bd62fd3ce71040d87cc07671948ff82300fae5f3af80dcd4e22c870c0102c4add460b2cbbeeb298f58037fc645da20aa8f5531a5ff56d3e5b2d1944",
- "30770201010420b145fc69cfabff378f390f0a99fb98ddc8ba9228cb1adf9c7099c6393a24567aa00a06082a8648ce3d030107a14403420004b660084cb05e005fb163011663fee6946f354714565069968f16e89e9a7aac45610f05502ff9d9e3cd0fdc88083bd8840a518b71135e59a0f0f235636d5eb7c4",
- "3077020101042082d39168f289e784ace49bfdd523297b524c494f83fe7d04dd2f055b48d636b9a00a06082a8648ce3d030107a14403420004ea4021da5eec4e7f333059625ecbad3969676cf625cbf0da316f55f50ccd40e6174fdb7023c07abdb3ca91203acbcb5e78e1601f1a9aa616c5019ac5b2222ff4",
- "3077020101042066a1ebc23e993674bfdc3b9721c280b7f3c1599903063ea7899b848b942a6169a00a06082a8648ce3d030107a144034200046bdb182c6c0c1f9ea898c3847bc4b46014cb8da6a02d75b7bed3c4a9a4e9c8836d4ce22fe68b68ae56a91fb435c7ea8f05bca8e8fcb1d6b77770d419f99e51da",
- "30770201010420fa2cda21b761c46fcc5b54d47b045e24affdb95425e859bb367a07950119ab6ba00a06082a8648ce3d030107a144034200044b9e4cee102ad23fea3357f8f5f95ab9d60d34086ba4b39d5f37cbc61998ac9658ec56033ad72977d41e449d449f5aac2bc653ea8038fc04a011ff02ec49e088",
- "3077020101042028acfb3c41b7be1d9d0506ac3702c363ffd767dd738dc8ab581ad7add2ec8872a00a06082a8648ce3d030107a144034200047467dedfb8c9a7d9496d4898d6ace0fba063545ab0d345d8b63b90871927ed269645a745a7335ca511d86a366f24e7832477842b4041a9ab564c5fbce49e4df8",
- "307702010104202e57b8b867bd95a8dfcdd2cb8f82ea41bff21610019afd6e2367e755dec5b944a00a06082a8648ce3d030107a144034200048f97eb2d6ee2d3da8746d8d4f84469ea765fb0d1412b167b6d8a916b5f968b4d64ede5ea6d6e08ec0de192262fcb3ebed49e9d17858261affed84827b38c6cc9",
- "3077020101042021a904281e4c31386ce34a5b52af3a068caa65819fbcf0ca76ab6041ecdaf454a00a06082a8648ce3d030107a1440342000405f9b7894a97fcddfc3285b8e974718606616fe07c70b7ab2bfb28a85fb3014c2610ab9e8e6da8ae3da032837d3a14b1e791d2633bdd8551b4817a080b9aa697",
- "3077020101042089c2c73d08bd03da4c3111aa0b78bb1edc5243d8e119513035d3741e851dec1ca00a06082a8648ce3d030107a14403420004ec9ebc34f45150334fd1d8c92274fe43c5b3b059f15cb1963f6cf7d54bc6b1b0b4ef1c5d56d2d06ab54ce2e7606e0fa5d2f188a2d593b22d9cf6a0098aa00cb6",
-}
-
-// DecodeKey creates a test private key.
-func DecodeKey(i int) *ecdsa.PrivateKey {
- if i < 0 {
- key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- if err != nil {
- panic("could not generate uniq key")
- }
-
- return key
- }
-
- if current, size := i, len(Keys); current >= size {
- panic("add more test keys, used " + strconv.Itoa(current) + " from " + strconv.Itoa(size))
- }
-
- buf, err := hex.DecodeString(Keys[i])
- if err != nil {
- panic("could not hex.Decode: " + err.Error())
- }
-
- key, err := x509.ParseECPrivateKey(buf)
- if err != nil {
- panic("could x509.ParseECPrivateKey: " + err.Error())
- }
-
- return key
-}
diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go
new file mode 100644
index 0000000000..7373e538fb
--- /dev/null
+++ b/pkg/util/testing/netmap_source.go
@@ -0,0 +1,36 @@
+package testing
+
+import (
+ "context"
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
+
+var (
+ errInvalidDiff = errors.New("invalid diff")
+ errNetmapNotFound = errors.New("netmap not found")
+)
+
+type TestNetmapSource struct {
+ Netmaps map[uint64]*netmap.NetMap
+ CurrentEpoch uint64
+}
+
+func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
+ if diff >= s.CurrentEpoch {
+ return nil, errInvalidDiff
+ }
+ return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff)
+}
+
+func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) {
+ if nm, found := s.Netmaps[epoch]; found {
+ return nm, nil
+ }
+ return nil, errNetmapNotFound
+}
+
+func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) {
+ return s.CurrentEpoch, nil
+}
diff --git a/pkg/util/worker_pool.go b/pkg/util/worker_pool.go
index 145fd1a5a9..97d76c4927 100644
--- a/pkg/util/worker_pool.go
+++ b/pkg/util/worker_pool.go
@@ -1,8 +1,9 @@
package util
import (
+ "sync/atomic"
+
"github.com/panjf2000/ants/v2"
- "go.uber.org/atomic"
)
// WorkerPool represents a tool to control
diff --git a/scripts/export-metrics/main.go b/scripts/export-metrics/main.go
new file mode 100644
index 0000000000..51705ee497
--- /dev/null
+++ b/scripts/export-metrics/main.go
@@ -0,0 +1,52 @@
+package main
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+
+ local_metrics "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+)
+
+var (
+ node = flag.String("node", "", "File to export storage node metrics to.")
+ ir = flag.String("ir", "", "File to export innerring node metrics to.")
+)
+
+func main() {
+ flag.Parse()
+
+ if *node != "" && *ir != "" {
+ fmt.Println("-node and -ir flags are mutually exclusive")
+ os.Exit(1)
+ }
+
+ var filename string
+ switch {
+ case *node != "":
+ _ = local_metrics.NewNodeMetrics()
+ filename = *node
+ case *ir != "":
+ _ = local_metrics.NewInnerRingMetrics()
+ filename = *ir
+
+ default:
+ flag.Usage()
+ os.Exit(1)
+ }
+
+ ds := metrics.DescribeAll()
+
+ data, err := json.Marshal(ds)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Could not parse marshal: %v\n", err)
+ os.Exit(1)
+ }
+
+ if err := os.WriteFile(filename, data, 0o644); err != nil {
+ fmt.Fprintf(os.Stderr, "Could write to file: %v\n", err)
+ os.Exit(1)
+ }
+}
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
new file mode 100644
index 0000000000..39a420358e
--- /dev/null
+++ b/scripts/populate-metabase/internal/generate.go
@@ -0,0 +1,133 @@
+package internal
+
+import (
+ cryptorand "crypto/rand"
+ "crypto/sha256"
+ "fmt"
+ "math/rand"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+)
+
+func GeneratePayloadPool(count uint, size uint) [][]byte {
+ var pool [][]byte
+ for range count {
+ payload := make([]byte, size)
+ _, _ = cryptorand.Read(payload)
+
+ pool = append(pool, payload)
+ }
+ return pool
+}
+
+func GenerateAttributePool(count uint) []objectSDK.Attribute {
+ var pool []objectSDK.Attribute
+ for i := range count {
+ for j := range count {
+ attr := *objectSDK.NewAttribute()
+ attr.SetKey(fmt.Sprintf("key%d", i))
+ attr.SetValue(fmt.Sprintf("value%d", j))
+ pool = append(pool, attr)
+ }
+ }
+ return pool
+}
+
+func GenerateOwnerPool(count uint) []user.ID {
+ var pool []user.ID
+ for range count {
+ pool = append(pool, usertest.ID())
+ }
+ return pool
+}
+
+type ObjectOption func(obj *objectSDK.Object)
+
+func GenerateObject(options ...ObjectOption) *objectSDK.Object {
+ var ver version.Version
+ ver.SetMajor(2)
+ ver.SetMinor(1)
+
+ payload := make([]byte, 0)
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(payload))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ obj := objectSDK.New()
+ obj.SetID(oidtest.ID())
+ obj.SetOwnerID(usertest.ID())
+ obj.SetContainerID(cidtest.ID())
+
+ header := objecttest.Object().GetECHeader()
+ header.SetParent(oidtest.ID())
+ obj.SetECHeader(header)
+
+ obj.SetVersion(&ver)
+ obj.SetPayload(payload)
+ obj.SetPayloadSize(uint64(len(payload)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+
+ for _, option := range options {
+ option(obj)
+ }
+
+ return obj
+}
+
+func WithContainerID(cid cid.ID) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ obj.SetContainerID(cid)
+ }
+}
+
+func WithType(typ objectSDK.Type) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ obj.SetType(typ)
+ }
+}
+
+func WithPayloadFromPool(pool [][]byte) ObjectOption {
+ payload := pool[rand.Intn(len(pool))]
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(payload))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ return func(obj *objectSDK.Object) {
+ obj.SetPayload(payload)
+ obj.SetPayloadSize(uint64(len(payload)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+ }
+}
+
+func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ var attrs []objectSDK.Attribute
+ for range count {
+ attrs = append(attrs, pool[rand.Intn(len(pool))])
+ }
+ obj.SetAttributes(attrs...)
+ }
+}
+
+func WithOwnerIDFromPool(pool []user.ID) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ obj.SetOwnerID(pool[rand.Intn(len(pool))])
+ }
+}
diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go
new file mode 100644
index 0000000000..fafe61eaa7
--- /dev/null
+++ b/scripts/populate-metabase/internal/populate.go
@@ -0,0 +1,260 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "sync"
+
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "golang.org/x/sync/errgroup"
+)
+
+type EpochState struct{}
+
+func (s EpochState) CurrentEpoch() uint64 {
+ return 0
+}
+
+func PopulateWithObjects(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ digits := "0123456789"
+
+ for range count {
+ obj := factory()
+ id := fmt.Appendf(nil, "%c/%c/%c",
+ digits[rand.Int()%len(digits)],
+ digits[rand.Int()%len(digits)],
+ digits[rand.Int()%len(digits)])
+
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+ prm.SetStorageID(id)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put an object: %w", err)
+ }
+ return nil
+ })
+ }
+}
+
+func PopulateWithBigObjects(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ for range count {
+ group.Go(func() error {
+ if err := populateWithBigObject(ctx, db, factory); err != nil {
+ return fmt.Errorf("couldn't put a big object: %w", err)
+ }
+ return nil
+ })
+ }
+}
+
+func populateWithBigObject(
+ ctx context.Context,
+ db *meta.DB,
+ factory func() *objectSDK.Object,
+) error {
+ t := &target{db: db}
+
+ pk, _ := keys.NewPrivateKey()
+ p := transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: &pk.PrivateKey,
+ NextTargetInit: func() transformer.ObjectWriter { return t },
+ NetworkState: EpochState{},
+ MaxSize: 10,
+ })
+
+ obj := factory()
+ payload := make([]byte, 30)
+
+ err := p.WriteHeader(ctx, obj)
+ if err != nil {
+ return err
+ }
+
+ _, err = p.Write(ctx, payload)
+ if err != nil {
+ return err
+ }
+
+ _, err = p.Close(ctx)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type target struct {
+ db *meta.DB
+}
+
+func (t *target) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+
+ _, err := t.db.Put(ctx, prm)
+ return err
+}
+
+func PopulateGraveyard(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ workBufferSize int,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ ts := factory()
+ ts.SetType(objectSDK.TypeTombstone)
+
+ prm := meta.PutPrm{}
+ prm.SetObject(ts)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put a tombstone object: %w", err)
+ }
+ return nil
+ })
+
+ cID, _ := ts.ContainerID()
+ oID, _ := ts.ID()
+
+ var tsAddr oid.Address
+
+ tsAddr.SetContainer(cID)
+ tsAddr.SetObject(oID)
+
+ addrs := make(chan oid.Address, workBufferSize)
+
+ go func() {
+ defer close(addrs)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(int(count))
+
+ for range count {
+ obj := factory()
+
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+
+ group.Go(func() error {
+ defer wg.Done()
+
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put an object: %w", err)
+ }
+
+ cID, _ := obj.ContainerID()
+ oID, _ := obj.ID()
+
+ var addr oid.Address
+ addr.SetContainer(cID)
+ addr.SetObject(oID)
+
+ addrs <- addr
+ return nil
+ })
+ }
+ wg.Wait()
+ }()
+
+ go func() {
+ for addr := range addrs {
+ prm := meta.InhumePrm{}
+ prm.SetAddresses(addr)
+ prm.SetTombstoneAddress(tsAddr)
+
+ group.Go(func() error {
+ if _, err := db.Inhume(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't inhume an object: %w", err)
+ }
+ return nil
+ })
+ }
+ }()
+}
+
+func PopulateLocked(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ workBufferSize int,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ locker := factory()
+ locker.SetType(objectSDK.TypeLock)
+
+ prm := meta.PutPrm{}
+ prm.SetObject(locker)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put a locker object: %w", err)
+ }
+ return nil
+ })
+
+ ids := make(chan oid.ID, workBufferSize)
+
+ go func() {
+ defer close(ids)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(int(count))
+
+ for range count {
+ defer wg.Done()
+
+ obj := factory()
+
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put an object: %w", err)
+ }
+
+ id, _ := obj.ID()
+ ids <- id
+ return nil
+ })
+ }
+ wg.Wait()
+ }()
+
+ go func() {
+ for id := range ids {
+ lockerCID, _ := locker.ContainerID()
+ lockerOID, _ := locker.ID()
+
+ group.Go(func() error {
+ if err := db.Lock(ctx, lockerCID, lockerOID, []oid.ID{id}); err != nil {
+ return fmt.Errorf("couldn't lock an object: %w", err)
+ }
+ return nil
+ })
+ }
+ }()
+}
diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go
new file mode 100644
index 0000000000..8c4ea41ada
--- /dev/null
+++ b/scripts/populate-metabase/main.go
@@ -0,0 +1,159 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/scripts/populate-metabase/internal"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "golang.org/x/sync/errgroup"
+)
+
+var (
+ path string
+ force bool
+ jobs uint
+
+ numContainers,
+ numObjects,
+ numAttributesPerObj,
+ numOwners,
+ numPayloads,
+ numAttributes uint
+)
+
+func main() {
+ flag.StringVar(&path, "path", "", "Path to metabase")
+ flag.BoolVar(&force, "force", false, "Rewrite existing database")
+ flag.UintVar(&jobs, "j", 10000, "Number of jobs to run")
+
+ flag.UintVar(&numContainers, "containers", 0, "Number of containers to be created")
+ flag.UintVar(&numObjects, "objects", 0, "Number of objects per container")
+ flag.UintVar(&numAttributesPerObj, "attributes", 0, "Number of attributes per object")
+
+ flag.UintVar(&numOwners, "distinct-owners", 10, "Number of distinct owners to be used")
+ flag.UintVar(&numPayloads, "distinct-payloads", 10, "Number of distinct payloads to be used")
+ flag.UintVar(&numAttributes, "distinct-attributes", 10, "Number of distinct attributes to be used")
+
+ flag.Parse()
+
+ exitIf(numPayloads == 0, "must have payloads\n")
+ exitIf(numAttributes == 0, "must have attributes\n")
+ exitIf(numOwners == 0, "must have owners\n")
+ exitIf(len(path) == 0, "path to metabase not specified\n")
+ exitIf(
+ numAttributesPerObj > numAttributes,
+ "object can't have more attributes than available\n",
+ )
+
+ info, err := os.Stat(path)
+ exitIf(
+ err != nil && !errors.Is(err, os.ErrNotExist),
+ "couldn't get path info: %s\n", err,
+ )
+
+ // Path exits.
+ if err == nil {
+ exitIf(info.IsDir(), "path is a directory\n")
+ exitIf(!force, "couldn't rewrite existing file, use '-force' flag\n")
+
+ err = os.Remove(path)
+ exitIf(err != nil, "couldn't remove existing file: %s\n", err)
+ }
+
+ err = populate()
+ exitIf(err != nil, "couldn't populate the metabase: %s\n", err)
+}
+
+func getObjectFactory(opts ...internal.ObjectOption) func() *objectSDK.Object {
+ return func() *objectSDK.Object {
+ return internal.GenerateObject(opts...)
+ }
+}
+
+func populate() (err error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ db := meta.New([]meta.Option{
+ meta.WithPath(path),
+ meta.WithPermissions(0o600),
+ meta.WithEpochState(internal.EpochState{}),
+ }...)
+
+ if err = db.Open(ctx, mode.ReadWrite); err != nil {
+ return fmt.Errorf("couldn't open the metabase: %w", err)
+ }
+ defer func() {
+ if errOnClose := db.Close(ctx); errOnClose != nil {
+ err = errors.Join(
+ err,
+ fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)),
+ )
+ }
+ }()
+
+ if err = db.Init(ctx); err != nil {
+ return fmt.Errorf("couldn't init the metabase: %w", err)
+ }
+
+ payloads := internal.GeneratePayloadPool(numPayloads, 32)
+ attributes := internal.GenerateAttributePool(numAttributes)
+ owners := internal.GenerateOwnerPool(numOwners)
+
+ types := []objectSDK.Type{
+ objectSDK.TypeRegular,
+ objectSDK.TypeLock,
+ objectSDK.TypeTombstone,
+ }
+
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.SetLimit(int(jobs))
+
+ for range numContainers {
+ cid := cidtest.ID()
+
+ for _, typ := range types {
+ internal.PopulateWithObjects(ctx, db, eg, numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(typ),
+ internal.WithPayloadFromPool(payloads),
+ internal.WithOwnerIDFromPool(owners),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ ))
+ }
+ internal.PopulateWithBigObjects(ctx, db, eg, numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(objectSDK.TypeRegular),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ internal.WithOwnerIDFromPool(owners),
+ ))
+ internal.PopulateGraveyard(ctx, db, eg, int(jobs), numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(objectSDK.TypeRegular),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ internal.WithOwnerIDFromPool(owners),
+ ))
+ internal.PopulateLocked(ctx, db, eg, int(jobs), numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(objectSDK.TypeRegular),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ internal.WithOwnerIDFromPool(owners),
+ ))
+ }
+
+ return eg.Wait()
+}
+
+func exitIf(cond bool, format string, args ...any) {
+ if cond {
+ fmt.Fprintf(os.Stderr, format, args...)
+ os.Exit(1)
+ }
+}