Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

52 changed files with 0 additions and 6590 deletions

View file

@ -1,20 +0,0 @@
FROM golang:1.22 as builder
ARG BUILD=now
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler
ARG VERSION=dev
WORKDIR /src
COPY . /src
RUN make
# Executable image
FROM alpine AS frostfs-s3-lifecycler
RUN apk add --no-cache bash ca-certificates
WORKDIR /
COPY --from=builder /src/bin/frostfs-s3-lifecycler /bin/frostfs-s3-lifecycler
ENTRYPOINT ["/bin/frostfs-s3-lifecycler"]

View file

@ -1,8 +0,0 @@
FROM alpine AS frostfs-s3-lifecycler
RUN apk add --no-cache bash ca-certificates
WORKDIR /
COPY /bin/frostfs-s3-lifecycler /bin/frostfs-s3-lifecycler
ENTRYPOINT ["/bin/frostfs-s3-lifecycler"]

View file

@ -1,3 +0,0 @@
.git
.cache
.github

View file

@ -1,23 +0,0 @@
on: [pull_request]
jobs:
builds:
name: Builds
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
- name: Build binary
run: make
- name: Check dirty suffix
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi

View file

@ -1,20 +0,0 @@
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v1
with:
from: 3fbad97a

View file

@ -1,40 +0,0 @@
on: [pull_request]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
cache: true
- name: Install linters
run: make lint-install
- name: Run linters
run: make lint
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
- name: Update Go modules
run: make dep
- name: Run tests
run: make test

View file

@ -1,21 +0,0 @@
on: [pull_request]
jobs:
vulncheck:
name: Vulncheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Run govulncheck
run: govulncheck ./...

7
.gitignore vendored
View file

@ -1,7 +0,0 @@
.idea
.cache
bin
temp
/plugins/
/vendor/
metrics-dump.json

View file

@ -1,11 +0,0 @@
[general]
fail-without-commits=True
regex-style-search=True
contrib=CC1
[title-match-regex]
regex=^\[\#[0-9Xx]+\]\s
[ignore-by-title]
regex=^Release(.*)
ignore=title-match-regex

View file

@ -1,68 +0,0 @@
# This file contains all available configuration options
# with their default values.
# options for analysis running
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 15m
# include test files or not, default is true
tests: true
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
formats:
- format: tab
# all available settings of specific linters
linters-settings:
exhaustive:
# indicates that switch statements are to be considered exhaustive if a
# 'default' case is present, even if all enum members aren't listed in the
# switch
default-signifies-exhaustive: true
govet:
# report about shadowed variables
check-shadowing: false
custom:
truecloudlab-linters:
path: bin/external_linters.so
original-url: git.frostfs.info/TrueCloudLab/linters.git
settings:
noliteral:
enable: true
target-methods: ["Fatal"]
disable-packages: []
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/logs"
linters:
enable:
# mandatory linters
- govet
- revive
# some default golangci-lint linters
- errcheck
- gosimple
- ineffassign
- staticcheck
- typecheck
- unused
# extra linters
- exhaustive
- godot
- gofmt
- whitespace
- goimports
- truecloudlab-linters
disable-all: true
fast: false
issues:
include:
- EXC0002 # should have a comment
- EXC0003 # test/Test ... consider calling this
- EXC0004 # govet
- EXC0005 # C-style breaks

View file

@ -1,52 +0,0 @@
ci:
autofix_prs: false
repos:
- repo: https://github.com/jorisroovers/gitlint
rev: v0.19.1
hooks:
- id: gitlint
stages: [commit-msg]
- id: gitlint-ci
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: check-merge-conflict
- id: check-json
- id: check-xml
- id: check-yaml
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
exclude: ".key$"
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.2
hooks:
- id: shellcheck
- repo: local
hooks:
- id: make-lint-install
name: install linters
entry: make lint-install
language: system
pass_filenames: false
- id: make-lint
name: run linters
entry: make lint
language: system
pass_filenames: false
- id: go-unit-tests
name: go unit tests
entry: make test
pass_filenames: false
types: [go]
language: system

View file

@ -1,29 +0,0 @@
# Changelog
This document outlines major changes between releases.
## [Unreleased]
## [0.1.2] - 2024-09-25
### Added
- More debug logs (#20)
## [0.1.1] - 2024-09-17
### Added
- Allow to configure several sources (#16)
### Fixed
- Don't create redundant delete markers (#17)
- Don't user btoken for separate lifecycle container (#19)
## [0.1.0] - 2024-07-26
### Added
- Basic lifecycle logic (#1, #2, #3, #4)
[0.1.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/compare/27189a38bb...v0.1.0
[0.1.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/compare/v0.1.0...v0.1.1
[0.1.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/compare/v0.1.1...v0.1.2
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/compare/v0.1.2...master

View file

@ -1,156 +0,0 @@
# Contribution guide
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
- Write tests and make sure the test suite passes locally and on CI.
- Open a pull request and reference the relevant issue(s).
- Make sure your commits are logically separated and have good comments
explaining the details of your change.
- After receiving a feedback, amend your commits or add new ones as
appropriate.
- **Have fun!**
## Development Workflow
Start by forking the `frostfs-s3-lifecycler` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your git repository
Fork [FrostFS S3 Gateway
upstream](https://git.frostfs.info/repo/fork/15) source repository
to your own personal repository. Copy the URL of your fork (you will need it for
the `git clone` command below).
```sh
$ git clone https://git.frostfs.info/<username>/frostfs-s3-lifecycler.git
```
### Set up git remote as ``upstream``
```sh
$ cd frostfs-s3-lifecycler
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler.git
$ git fetch upstream
$ git merge upstream/master
...
```
### Create your feature branch
Before making code changes, make sure you create a separate branch for these
changes. Maybe you will find it convenient to name a branch in
`<type>/<Issue>-<changes_topic>` format.
```
$ git checkout -b feature/123-something_awesome
```
### Test your changes
After your code changes, make sure
- To add test cases for the new code.
- To run `make lint`
- To squash your commits into a single commit or a series of logically separated
commits with `git rebase -i`. It's okay to force update your pull request.
- To run `make test` and `make all` successfully.
### Commit changes
After verification, commit your changes. There is a [great
post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
messages. Try following this template:
```
[#Issue] <component> Summary
Description
<Macros>
<Sign-Off>
```
```
$ git commit -ams '[#123] Add some feature'
```
### Push to the branch
Push your locally committed changes to the remote origin (your fork)
```
$ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via Forgejo. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.
## DCO Sign off
All authors to the project retain copyright to their work. However, to ensure
that they are only submitting work that they have rights to, we require
everyone to acknowledge this by signing their work.
Any copyright notices in this repository should specify the authors as "the
contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Samii Sakisaka <samii@frostfs.info>
```
This can be easily done with the `--signoff` option to `git commit`.
By doing this you state that you can certify the following (from [The Developer
Certificate of Origin](https://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```

View file

@ -1,11 +0,0 @@
# Credits
In alphabetical order:
- Denis Kirillov (@dkirillov)
# Contributors
In chronological order:
- Denis Kirillov (@dkirillov)

166
Makefile
View file

@ -1,166 +0,0 @@
#!/usr/bin/make -f
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.21
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
BINDIR = bin
METRICS_DUMP_OUT ?= ./metrics-dump.json
CMDS = $(notdir $(basename $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS))
# Variables for docker
REPO_BASENAME = $(shell basename `go list -m`)
HUB_IMAGE ?= "truecloudlab/$(REPO_BASENAME)"
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Make all binaries
.PHONY: all
all: $(BINS)
.PHONY: $(BINS)
$(BINS): $(BINDIR) dep
@echo "⇒ Build $@"
CGO_ENABLED=0 \
go build -v -trimpath \
-ldflags "-X main.Version=$(VERSION)" \
-o bin/frostfs-s3-lifecycler ./cmd/$(notdir $@)
.PHONY: $(BINDIR)
$(BINDIR):
@echo "⇒ Ensure dir: $@"
@mkdir -p $@
# Pull go dependencies
.PHONY: dep
dep:
@printf "⇒ Download requirements: "
@CGO_ENABLED=0 \
go mod download && echo OK
@printf "⇒ Tidy requirements: "
@CGO_ENABLED=0 \
go mod tidy -v && echo OK
.PHONY: image
image:
@echo "⇒ Build FrostFS S3 Lifecycler docker image "
@docker build \
--build-arg REPO=$(REPO) \
--build-arg VERSION=$(VERSION) \
--rm \
-f .docker/Dockerfile \
-t $(HUB_IMAGE):$(HUB_TAG) .
.PHONY: image-push
image-push:
@echo "⇒ Publish image"
@docker push $(HUB_IMAGE):$(HUB_TAG)
.PHONY: dirty-image
dirty-image:
@echo "⇒ Build FrostFS S3 Lifecycler dirty docker image "
@docker build \
--build-arg REPO=$(REPO) \
--build-arg VERSION=$(VERSION) \
--rm \
-f .docker/Dockerfile.dirty \
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
.PHONY: docker/
docker/%:
$(if $(filter $*,all $(BINS)), \
@echo "=> Running 'make $*' in clean Docker environment" && \
docker run --rm -t \
-v `pwd`:/src \
-w /src \
-u `stat -c "%u:%g" .` \
--env HOME=/src \
golang:$(GO_VERSION) make $*,\
@echo "supported docker targets: all $(BINS) lint")
# Run tests
.PHONY: test
test:
@go test ./... -cover
# Run tests with race detection and produce coverage output
.PHONY: cover
cover:
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
@go tool cover -html=coverage.txt -o coverage.html
# Run all code formatters
.PHONY: fmts
fmts: fmt imports
# Reformat code
.PHONY: fmt
fmt:
@echo "⇒ Processing gofmt check"
@GO111MODULE=on gofmt -s -w ./
# Reformat imports
.PHONY: imports
imports:
@echo "⇒ Processing goimports check"
@GO111MODULE=on goimports -w ./
# Install linters
.PHONY: lint-install
lint-install:
@if [ ! -d "$(LINT_DIR)" ]; then \
mkdir -p $(TMP_DIR); \
rm -rf $(TMP_DIR)/linters; \
git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters; \
make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR); \
rm -rf $(TMP_DIR)/linters; \
rmdir $(TMP_DIR) 2>/dev/null || true; \
CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION); \
fi
# Run linters
.PHONY: lint
lint: lint-install
$(LINT_DIR)/golangci-lint --timeout=5m run
# Run linters in Docker
.PHONY: docker/lint
docker/lint:
docker run --rm -it \
-v `pwd`:/src \
-u `stat -c "%u:%g" .` \
--env HOME=/src \
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
# Activate pre-commit hooks
.PHONY: pre-commit
pre-commit:
pre-commit install -t pre-commit -t commit-msg
# Deactivate pre-commit hooks
.PHONY: unpre-commit
unpre-commit:
pre-commit uninstall -t pre-commit -t commit-msg
.PHONY: clean
clean:
@rm -rf $(DIRS)
# Show current version
.PHONY: version
version:
@echo $(VERSION)
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
.PHONY: dump-metrics
dump-metrics:
@go test ./internal/metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
include help.mk

132
README.md
View file

@ -1,133 +1 @@
# FrostFS S3 Lifecycler # FrostFS S3 Lifecycler
The purpose of this service is to provide lifecycle management of objects
(https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html).
This service works with objects and lifecycle configurations uploaded to FrostFS mainly by using
[s3-gw](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw).
## Build and run
To build service use the following command:
```shell
$ make
```
To run service use the following command:
```shell
$ ./bin/frostfs-s3-lifecycler --config config.yaml
```
Minimal config example to run with [dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env) can be:
```yaml
wallet:
path: ./frostfs-dev-env/wallets/wallet.json
address: NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM
passphrase: ""
morph:
rpc_endpoint:
- address: ws://morph-chain.frostfs.devenv:30333/ws
credential:
use: wallets
source:
wallets:
- path: ./frostfs-dev-env/wallets/wallet.json
address: NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM
passphrase: ""
frostfs:
peers:
0:
priority: 1
weight: 1
address: s01.frostfs.devenv:8080
```
More detailed description of each parameter can be found [here](docs/configuration.md).
## Description
Once running service listens new epoch notification from FrostFS network.
On each epoch service lists all users from `frostfsid`
[contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract/src/branch/master/frostfsid) and check each
user bucket lifecycle configuration to validate if objects or multipart uploads should be deleted according to this
configuration.
> **NOTE**
>
> Lifecycler handles only expiration actions (transition isn't supported).
If object should be deleted or multipart aborted lifecycler service perform delete action. It must have user credential
for this operation, so service must be provided with private key of each user. It can be done by specifying wallets
in config:
```yaml
credential:
use: wallets
source:
wallets:
- path: ./frostfs-dev-env/wallets/wallet.json
address: NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM
passphrase: ""
```
Currently only wallet source is supported.
### S3
To apply lifecycle configuration to bucket we must invoke
[PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
method on [s3-gw](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw). It can be done using
[AWS CLI](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-lifecycle-configuration.html).
Lifecycle configuration example:
```json
{
"Rules": [
{
"Status": "Enabled",
"ID": "Abort 1 day old multiparts",
"AbortIncompleteMultipartUpload": {
"DaysAfterInitiation": 1
}
},
{
"Status": "Enabled",
"ID": "Expire objects with prefix after specific date",
"Expiration": {
"Date": "2024-07-31T06:32:00Z"
},
"Filter": {
"Prefix": "prefix"
}
},
{
"Status": "Enabled",
"ID": "Expire objects by tags after specific date",
"Expiration": {
"Date": "2024-07-31T06:32:00Z"
},
"Filter": {
"And": {
"Prefix": "tags",
"Tags": [{
"Key":"tag-key",
"Value":"tag-val"
}]
}
}
}
]
}
```
Details can be found in AWS documentaion
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-configuration-examples.html

View file

@ -1 +0,0 @@
v0.1.2

View file

@ -1,359 +0,0 @@
package main
import (
"context"
"os"
"os/signal"
"sync"
"syscall"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/lifecycle"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/morph"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/morph/contract"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/notificator"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/viper"
"go.uber.org/zap"
)
type (
App struct {
log *zap.Logger
logLevel zap.AtomicLevel
key *keys.PrivateKey
cfg *viper.Viper
done chan struct{}
appServices []*metrics.Service
appMetrics *metrics.AppMetrics
notificator *notificator.Notificator
settings *appSettings
}
appSettings struct {
mu sync.RWMutex
serviceKeys []*keys.PublicKey
}
)
const (
HealthStatusUndefined int32 = 0
HealthStatusStarting int32 = 1
HealthStatusReady int32 = 2
HealthStatusShuttingDown int32 = 3
)
func newApp(ctx context.Context, cfg *viper.Viper) *App {
appMetrics := metrics.NewAppMetrics()
log := pickLogger(cfg, appMetrics)
a := &App{
log: log.logger,
logLevel: log.lvl,
cfg: cfg,
done: make(chan struct{}),
appMetrics: appMetrics,
settings: newAppSettings(cfg, log),
}
a.appMetrics.SetHealth(HealthStatusStarting)
a.init(ctx)
return a
}
func (a *App) init(ctx context.Context) {
var err error
a.key, err = fetchKey(a.cfg)
if err != nil {
a.log.Fatal(logs.FailedToLoadPrivateKey, zap.Error(err))
}
endpoints := fetchMorphEndpoints(a.cfg, a.log)
reconnectInterval := fetchMorphReconnectClientsInterval(a.cfg)
clientCfg := morph.Config{
Logger: a.log,
Endpoints: endpoints,
Key: a.key,
ReconnectInterval: reconnectInterval,
DialTimeout: fetchMorphDialTimeout(a.cfg),
}
cli, err := morph.New(ctx, clientCfg)
if err != nil {
a.log.Fatal(logs.FailedToInitMorphClient, zap.Error(err))
}
credSource := fetchCredentialSource(a.cfg, a.log)
frostfsidContract, err := resolver.ResolveContractHash(cli, a.cfg.GetString(cfgMorphContractFrostfsID))
if err != nil {
a.log.Fatal(logs.ResolveFrostfsIDContract, zap.Error(err))
}
ffsidCfg := contract.FrostFSIDConfig{
Client: cli,
ContractHash: frostfsidContract,
}
containerContract, err := resolver.ResolveContractHash(cli, a.cfg.GetString(cfgMorphContractContainer))
if err != nil {
a.log.Fatal(logs.ResolveContainerContract, zap.Error(err))
}
containerCfg := contract.ContainerConfig{
Client: cli,
ContractHash: containerContract,
Log: a.log,
}
objPool, treePool := getPools(ctx, a.cfg, a.log, a.key)
epochCh := make(chan uint64)
go func() {
<-a.done
close(epochCh)
}()
ffs := frostfs.NewFrostFS(objPool, a.log)
tr := tree.NewTree(frostfs.NewTreePoolWrapper(treePool), a.log)
lifecycleCfg := lifecycle.Config{
UserFetcher: contract.NewFrostFSID(ffsidCfg),
ContainerFetcher: contract.NewContainer(containerCfg),
FrostFSFetcher: ffs,
CredentialSource: credSource,
Settings: a.settings,
CurrentLifecycler: a.key,
Logger: a.log,
TreeFetcher: tr,
BufferSize: fetchJobFetcherBuffer(a.cfg),
EpochChannel: epochCh,
}
jobProvider := lifecycle.NewJobProvider(ctx, lifecycleCfg)
executorCfg := lifecycle.ExecutorConfig{
Logger: a.log,
Jobs: jobProvider.Jobs(),
WorkerPoolSize: fetchExecutorPoolSize(a.cfg),
TreeFetcher: tr,
FrostFSFetcher: ffs,
}
executor, err := lifecycle.NewExecutor(ctx, executorCfg)
if err != nil {
a.log.Fatal(logs.ResolveNetmapContract, zap.Error(err))
}
_ = executor // todo consider run with separate method
netmapContract, err := resolver.ResolveContractHash(cli, a.cfg.GetString(cfgMorphContractNetmap))
if err != nil {
a.log.Fatal(logs.ResolveNetmapContract, zap.Error(err))
}
notificatorCfg := notificator.Config{
Handler: func(ctx context.Context, ee notificator.NewEpochEvent) {
a.log.Info(logs.HandlerTriggered, zap.Uint64("epoch", ee.Epoch))
select {
case <-ctx.Done():
a.log.Debug(logs.HandlerContextCanceled, zap.Error(ctx.Err()))
case epochCh <- ee.Epoch:
}
},
Logger: a.log,
NewListenerFn: func(config notificator.ListenerConfig) (notificator.Listener, error) {
lnCfg := notificator.ConfigListener{
Client: cli,
Logger: a.log,
ReconnectInterval: reconnectInterval,
Parser: config.Parser,
Handler: config.Handler,
}
return notificator.NewListener(ctx, lnCfg)
},
NetmapContract: netmapContract,
}
if a.notificator, err = notificator.New(ctx, notificatorCfg); err != nil {
a.log.Fatal(logs.InitNotificator, zap.Error(err))
}
}
func newAppSettings(v *viper.Viper, log *Logger) *appSettings {
s := &appSettings{}
s.update(v, log.logger)
return s
}
func (s *appSettings) update(cfg *viper.Viper, log *zap.Logger) {
svcKeys, svcKeyErr := fetchLifecycleServices(cfg)
if svcKeyErr != nil {
log.Warn(logs.FailedToFetchServicesKeys, zap.Error(svcKeyErr))
}
s.mu.Lock()
defer s.mu.Unlock()
if svcKeyErr == nil {
s.serviceKeys = svcKeys
}
}
func (s *appSettings) ServicesKeys() keys.PublicKeys {
s.mu.RLock()
defer s.mu.RUnlock()
return s.serviceKeys
}
func (a *App) Wait() {
a.log.Info(logs.ApplicationStarted,
zap.String("app_name", "frostfs-s3-lifecycler"),
zap.String("version", Version))
a.appMetrics.SetHealth(HealthStatusReady)
a.appMetrics.SetVersion(Version)
<-a.done
a.log.Info(logs.ApplicationStopped)
}
func (a *App) Serve(ctx context.Context) {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGHUP)
a.startAppServices()
go a.notificator.Start(ctx)
loop:
for {
select {
case <-ctx.Done():
break loop
case <-sigs:
a.configReload()
}
}
a.log.Info(logs.StoppingApplication)
a.appMetrics.SetHealth(HealthStatusShuttingDown)
a.stopAppServices()
close(a.done)
}
func (a *App) configReload() {
a.log.Info(logs.SIGHUPConfigReloadStarted)
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
return
}
if err := readInConfig(a.cfg); err != nil {
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
return
}
if lvl, err := getLogLevel(a.cfg.GetString(cfgLoggerLevel)); err != nil {
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
} else {
a.logLevel.SetLevel(lvl)
}
a.stopAppServices()
a.startAppServices()
a.settings.update(a.cfg, a.log)
a.log.Info(logs.SIGHUPConfigReloadCompleted)
}
func (a *App) startAppServices() {
a.appServices = a.appServices[:0]
pprofConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPprofEnabled), Address: a.cfg.GetString(cfgPprofAddress)}
pprofService := metrics.NewPprofService(a.log, pprofConfig)
a.appServices = append(a.appServices, pprofService)
go pprofService.Start()
prometheusConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPrometheusEnabled), Address: a.cfg.GetString(cfgPrometheusAddress)}
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
a.appServices = append(a.appServices, prometheusService)
go prometheusService.Start()
}
func (a *App) stopAppServices() {
ctx, cancel := context.WithTimeout(context.Background(), defaultShutdownTimeout)
defer cancel()
for _, svc := range a.appServices {
svc.ShutDown(ctx)
}
}
func getPools(ctx context.Context, cfg *viper.Viper, logger *zap.Logger, key *keys.PrivateKey) (*pool.Pool, *treepool.Pool) {
var prm pool.InitParameters
var prmTree treepool.InitParameters
prm.SetKey(&key.PrivateKey)
prmTree.SetKey(key)
for _, peer := range fetchPeers(cfg, logger) {
prm.AddNode(peer)
prmTree.AddNode(peer)
}
connTimeout := fetchConnectTimeout(cfg)
prm.SetNodeDialTimeout(connTimeout)
prmTree.SetNodeDialTimeout(connTimeout)
streamTimeout := fetchStreamTimeout(cfg)
prm.SetNodeStreamTimeout(streamTimeout)
prmTree.SetNodeStreamTimeout(streamTimeout)
healthCheckTimeout := fetchHealthCheckTimeout(cfg)
prm.SetHealthcheckTimeout(healthCheckTimeout)
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
rebalanceInterval := fetchRebalanceInterval(cfg)
prm.SetClientRebalanceInterval(rebalanceInterval)
prmTree.SetClientRebalanceInterval(rebalanceInterval)
errorThreshold := fetchErrorThreshold(cfg)
prm.SetErrorThreshold(errorThreshold)
prm.SetLogger(logger)
prmTree.SetLogger(logger)
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgFrostFSTreePoolMaxAttempts))
p, err := pool.NewPool(prm)
if err != nil {
logger.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
}
if err = p.Dial(ctx); err != nil {
logger.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
}
treePool, err := treepool.NewPool(prmTree)
if err != nil {
logger.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
}
if err = treePool.Dial(ctx); err != nil {
logger.Fatal(logs.FailedToDialTreePool, zap.Error(err))
}
return p, treePool
}

View file

@ -1,153 +0,0 @@
package main
import (
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/metrics"
"git.frostfs.info/TrueCloudLab/zapjournald"
"github.com/nspcc-dev/neo-go/cli/options"
"github.com/spf13/viper"
"github.com/ssgreg/journald"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
const (
destinationStdout string = "stdout"
destinationJournald string = "journald"
)
type Logger struct {
logger *zap.Logger
lvl zap.AtomicLevel
}
func pickLogger(v *viper.Viper, appMetrics *metrics.AppMetrics) *Logger {
switch dest := v.GetString(cfgLoggerDestination); dest {
case destinationStdout:
return newStdoutLogger(v, appMetrics)
case destinationJournald:
return newJournaldLogger(v, appMetrics)
default:
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
}
}
func newStdoutLogger(v *viper.Viper, appMetrics *metrics.AppMetrics) *Logger {
c := newZapLogConfig(v)
out, errSink, err := openZapSinks(c)
if err != nil {
panic(fmt.Sprintf("open zap sinks: %v", err.Error()))
}
core := zapcore.NewCore(zapcore.NewConsoleEncoder(c.EncoderConfig), out, c.Level)
core = applyZapCoreMiddlewares(core, v, appMetrics)
l := zap.New(core, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.ErrorOutput(errSink))
return &Logger{logger: l, lvl: c.Level}
}
func newJournaldLogger(v *viper.Viper, appMetrics *metrics.AppMetrics) *Logger {
c := newZapLogConfig(v)
// We can use NewJSONEncoder instead if, say, frontend
// would like to access journald logs and parse them easily.
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
journalCore := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
core := journalCore.With([]zapcore.Field{
zapjournald.SyslogFacility(zapjournald.LogDaemon),
zapjournald.SyslogIdentifier(),
zapjournald.SyslogPid(),
})
core = applyZapCoreMiddlewares(core, v, appMetrics)
l := zap.New(core, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
return &Logger{logger: l, lvl: c.Level}
}
func openZapSinks(cfg zap.Config) (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
sink, closeOut, err := zap.Open(cfg.OutputPaths...)
if err != nil {
return nil, nil, err
}
errSink, _, err := zap.Open(cfg.ErrorOutputPaths...)
if err != nil {
closeOut()
return nil, nil, err
}
return sink, errSink, nil
}
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, appMetrics *metrics.AppMetrics) zapcore.Core {
core = options.NewFilteringCore(core, filteringLogOption(v))
if v.GetBool(cfgLoggerSamplingEnabled) {
core = zapcore.NewSamplerWithOptions(core,
v.GetDuration(cfgLoggerSamplingInterval),
v.GetInt(cfgLoggerSamplingInitial),
v.GetInt(cfgLoggerSamplingThereafter),
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
if dec&zapcore.LogDropped > 0 {
appMetrics.DroppedLogsInc()
}
}))
}
return core
}
func newZapLogConfig(v *viper.Viper) zap.Config {
lvl, err := getLogLevel(v.GetString(cfgLoggerLevel))
if err != nil {
panic(err)
}
c := zap.Config{
Level: zap.NewAtomicLevelAt(lvl),
EncoderConfig: zap.NewProductionEncoderConfig(),
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
return c
}
func filteringLogOption(v *viper.Viper) options.FilterFunc {
tags := v.GetStringSlice(cfgLoggerTags)
return func(entry zapcore.Entry) bool {
if !strings.HasPrefix(entry.Message, "tag:") {
return true
}
msg := entry.Message[4:] // len("tag:")
for _, tag := range tags {
if strings.HasPrefix(msg, tag) {
return true
}
}
return false
}
}
func getLogLevel(lvlStr string) (zapcore.Level, error) {
var lvl zapcore.Level
err := lvl.UnmarshalText([]byte(lvlStr))
if err != nil {
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
zapcore.DebugLevel,
zapcore.InfoLevel,
zapcore.WarnLevel,
zapcore.ErrorLevel,
zapcore.DPanicLevel,
zapcore.PanicLevel,
zapcore.FatalLevel,
})
}
return lvl, nil
}

View file

@ -1,16 +0,0 @@
package main
import (
"context"
"os/signal"
"syscall"
)
func main() {
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer cancel()
app := newApp(ctx, settings())
go app.Serve(ctx)
app.Wait()
}

View file

@ -1,10 +0,0 @@
package main
// Prefix is a prefix used for environment variables containing auth
// configuration.
const Prefix = "S3_LIFECYCLER"
var (
// Version is the FrostFS S3 Lifecycler service version.
Version = "dev"
)

View file

@ -1,509 +0,0 @@
package main
import (
"fmt"
"os"
"path"
"runtime"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/credential/walletsource"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/lifecycle"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
neogoflags "github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"go.uber.org/zap"
)
const (
// Wallet.
cfgWalletPath = "wallet.path"
cfgWalletAddress = "wallet.address"
cfgWalletPassphrase = "wallet.passphrase"
// Metrics.
cfgPrometheusEnabled = "prometheus.enabled"
cfgPrometheusAddress = "prometheus.address"
cfgPprofEnabled = "pprof.enabled"
cfgPprofAddress = "pprof.address"
// Logger.
cfgLoggerLevel = "logger.level"
cfgLoggerDestination = "logger.destination"
cfgLoggerSamplingEnabled = "logger.sampling.enabled"
cfgLoggerSamplingInitial = "logger.sampling.initial"
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
cfgLoggerSamplingInterval = "logger.sampling.interval"
cfgLoggerTags = "logger.tags"
// Morph.
cfgMorphRPCEndpointPrefixTmpl = "morph.rpc_endpoint.%d."
cfgMorphRPCEndpointAddressTmpl = cfgMorphRPCEndpointPrefixTmpl + "address"
cfgMorphRPCEndpointPriorityTmpl = cfgMorphRPCEndpointPrefixTmpl + "priority"
cfgMorphRPCEndpointTrustedCAListTmpl = cfgMorphRPCEndpointPrefixTmpl + "trusted_ca_list"
cfgMorphRPCEndpointCertificateTmpl = cfgMorphRPCEndpointPrefixTmpl + "certificate"
cfgMorphRPCEndpointKeyTmpl = cfgMorphRPCEndpointPrefixTmpl + "key"
cfgMorphReconnectClientsInterval = "morph.reconnect_clients_interval"
cfgMorphDialTimeout = "morph.dial_timeout"
cfgMorphContractNetmap = "morph.contract.netmap"
cfgMorphContractFrostfsID = "morph.contract.frostfsid"
cfgMorphContractContainer = "morph.contract.container"
// Credential source.
cfgCredentialUse = "credential.use"
cfgCredentialSourceWalletsPrefixTmpl = "credential.source.wallets.%d."
cfgCredentialSourceWalletsPathTmpl = cfgCredentialSourceWalletsPrefixTmpl + "path"
cfgCredentialSourceWalletsAddressTmpl = cfgCredentialSourceWalletsPrefixTmpl + "address"
cfgCredentialSourceWalletsPassphraseTmpl = cfgCredentialSourceWalletsPrefixTmpl + "passphrase"
// FrostFS.
cfgFrostFSConnectTimeout = "frostfs.connect_timeout"
cfgFrostFSStreamTimeout = "frostfs.stream_timeout"
cfgFrostFSHealthcheckTimeout = "frostfs.healthcheck_timeout"
cfgFrostFSRebalanceInterval = "frostfs.rebalance_interval"
cfgFrostFSPoolErrorThreshold = "frostfs.pool_error_threshold"
cfgFrostFSTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
cfgFrostFSPeersPrefixTmpl = "frostfs.peers.%d."
cfgFrostFSPeersAddressTmpl = cfgFrostFSPeersPrefixTmpl + "address"
cfgFrostFSPeersPriorityTmpl = cfgFrostFSPeersPrefixTmpl + "priority"
cfgFrostFSPeersWeightTmpl = cfgFrostFSPeersPrefixTmpl + "weight"
// Lifecycle.
cfgLifecycleJobFetcherBuffer = "lifecycle.job_fetcher_buffer"
cfgLifecycleExecutorPoolSize = "lifecycle.executor_pool_size"
cfgLifecycleServices = "lifecycle.services"
// Command line args.
cmdHelp = "help"
cmdVersion = "version"
cmdConfig = "config"
cmdConfigDir = "config-dir"
)
const (
defaultShutdownTimeout = 15 * time.Second
componentName = "frostfs-s3-lifecycler"
defaultMorphRPCEndpointPriority = 1
defaultMorphReconnectClientsInterval = 30 * time.Second
defaultMorphDialTimeout = 5 * time.Second
defaultFrostFSRebalanceInterval = 60 * time.Second
defaultFrostFSHealthcheckTimeout = 15 * time.Second
defaultFrostFSConnectTimeout = 10 * time.Second
defaultFrostFSStreamTimeout = 10 * time.Second
defaultFrostFSPoolErrorThreshold uint32 = 100
defaultLifecycleJobFetcherBuffer = 1000
defaultLifecycleExecutorPoolSize = 100
)
func settings() *viper.Viper {
v := viper.New()
v.AutomaticEnv()
v.SetEnvPrefix(Prefix)
v.AllowEmptyEnv(true)
v.SetConfigType("yaml")
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
// flags setup:
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
flags.SetOutput(os.Stdout)
flags.SortFlags = false
help := flags.BoolP(cmdHelp, "h", false, "show help")
version := flags.BoolP(cmdVersion, "v", false, "show version")
flags.StringArrayP(cmdConfig, "c", nil, "config paths")
flags.String(cmdConfigDir, "", "config dir path")
// set defaults:
// logger:
v.SetDefault(cfgLoggerLevel, "info")
v.SetDefault(cfgLoggerDestination, "stdout")
v.SetDefault(cfgLoggerSamplingThereafter, 100)
v.SetDefault(cfgLoggerSamplingInitial, 100)
v.SetDefault(cfgLoggerSamplingInterval, time.Second)
// services:
v.SetDefault(cfgPrometheusEnabled, false)
v.SetDefault(cfgPprofEnabled, false)
// morph:
v.SetDefault(cfgMorphReconnectClientsInterval, defaultMorphReconnectClientsInterval)
v.SetDefault(cfgMorphDialTimeout, defaultMorphDialTimeout)
v.SetDefault(cfgMorphContractNetmap, "netmap.frostfs")
v.SetDefault(cfgMorphContractFrostfsID, "frostfsid.frostfs")
v.SetDefault(cfgMorphContractContainer, "container.frostfs")
// frostfs:
v.SetDefault(cfgFrostFSConnectTimeout, defaultFrostFSConnectTimeout)
v.SetDefault(cfgFrostFSRebalanceInterval, defaultFrostFSRebalanceInterval)
v.SetDefault(cfgFrostFSHealthcheckTimeout, defaultFrostFSHealthcheckTimeout)
v.SetDefault(cfgFrostFSStreamTimeout, defaultFrostFSStreamTimeout)
// lifecycle:
v.SetDefault(cfgLifecycleJobFetcherBuffer, defaultLifecycleJobFetcherBuffer)
v.SetDefault(cfgLifecycleExecutorPoolSize, defaultLifecycleExecutorPoolSize)
// Bind flags with configuration values.
if err := v.BindPFlags(flags); err != nil {
panic(err)
}
if err := flags.Parse(os.Args); err != nil {
panic(err)
}
switch {
case help != nil && *help:
printVersion()
flags.PrintDefaults()
os.Exit(0)
case version != nil && *version:
printVersion()
os.Exit(0)
}
if err := readInConfig(v); err != nil {
panic(err)
}
return v
}
func readInConfig(v *viper.Viper) error {
if v.IsSet(cmdConfig) {
if err := readConfig(v); err != nil {
return err
}
}
if v.IsSet(cmdConfigDir) {
if err := readConfigDir(v); err != nil {
return err
}
}
return nil
}
func readConfigDir(v *viper.Viper) error {
cfgSubConfigDir := v.GetString(cmdConfigDir)
entries, err := os.ReadDir(cfgSubConfigDir)
if err != nil {
return err
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
ext := path.Ext(entry.Name())
if ext != ".yaml" && ext != ".yml" {
continue
}
if err = mergeConfig(v, path.Join(cfgSubConfigDir, entry.Name())); err != nil {
return err
}
}
return nil
}
func readConfig(v *viper.Viper) error {
for _, fileName := range v.GetStringSlice(cmdConfig) {
if err := mergeConfig(v, fileName); err != nil {
return err
}
}
return nil
}
func mergeConfig(v *viper.Viper, fileName string) error {
cfgFile, err := os.Open(fileName)
if err != nil {
return err
}
defer func() {
if err2 := cfgFile.Close(); err2 != nil {
panic(err2)
}
}()
err = v.MergeConfig(cfgFile)
return err
}
func printVersion() {
fmt.Printf("%s\nVersion: %s\nGoVersion: %s\n", componentName, Version, runtime.Version())
}
func fetchKey(v *viper.Viper) (*keys.PrivateKey, error) {
var password *string
if v.IsSet(cfgWalletPassphrase) {
pwd := v.GetString(cfgWalletPassphrase)
password = &pwd
}
walletPath := v.GetString(cfgWalletPath)
if len(walletPath) == 0 {
return nil, fmt.Errorf("wallet path must not be empty")
}
w, err := wallet.NewWalletFromFile(walletPath)
if err != nil {
return nil, fmt.Errorf("parse wallet: %w", err)
}
walletAddress := v.GetString(cfgWalletAddress)
var addr util.Uint160
if len(walletAddress) == 0 {
addr = w.GetChangeAddress()
} else {
addr, err = neogoflags.ParseAddress(walletAddress)
if err != nil {
return nil, fmt.Errorf("invalid address")
}
}
acc := w.GetAccount(addr)
if acc == nil {
return nil, fmt.Errorf("couldn't find wallet account for %s", walletAddress)
}
if password == nil {
pwd, err := input.ReadPassword(fmt.Sprintf("Enter password for %s > ", walletPath))
if err != nil {
return nil, fmt.Errorf("couldn't read password")
}
password = &pwd
}
if err = acc.Decrypt(*password, w.Scrypt); err != nil {
return nil, fmt.Errorf("couldn't decrypt account: %w", err)
}
return acc.PrivateKey(), nil
}
func fetchMorphEndpoints(v *viper.Viper, l *zap.Logger) []client.Endpoint {
var res []client.Endpoint
for i := 0; ; i++ {
addr := v.GetString(fmt.Sprintf(cfgMorphRPCEndpointAddressTmpl, i))
if addr == "" {
break
}
priority := v.GetInt(fmt.Sprintf(cfgMorphRPCEndpointPriorityTmpl, i))
if priority <= 0 {
priority = defaultMorphRPCEndpointPriority
}
var mtlsConfig *client.MTLSConfig
rootCAs := v.GetStringSlice(fmt.Sprintf(cfgMorphRPCEndpointTrustedCAListTmpl, i))
if len(rootCAs) != 0 {
mtlsConfig = &client.MTLSConfig{
TrustedCAList: rootCAs,
KeyFile: v.GetString(fmt.Sprintf(cfgMorphRPCEndpointKeyTmpl, i)),
CertFile: v.GetString(fmt.Sprintf(cfgMorphRPCEndpointCertificateTmpl, i)),
}
}
res = append(res, client.Endpoint{
Address: addr,
Priority: priority,
MTLSConfig: mtlsConfig,
})
}
if len(res) == 0 {
l.Fatal(logs.NoMorphRPCEndpoints)
}
return res
}
func fetchWalletsCredentials(v *viper.Viper, l *zap.Logger) []walletsource.Wallet {
var res []walletsource.Wallet
for i := 0; ; i++ {
walletPath := v.GetString(fmt.Sprintf(cfgCredentialSourceWalletsPathTmpl, i))
if walletPath == "" {
break
}
res = append(res, walletsource.Wallet{
Path: walletPath,
Address: v.GetString(fmt.Sprintf(cfgCredentialSourceWalletsAddressTmpl, i)),
Passphrase: v.GetString(fmt.Sprintf(cfgCredentialSourceWalletsPassphraseTmpl, i)),
})
}
if len(res) == 0 {
l.Fatal(logs.NoCredentialSourceWallets)
}
return res
}
func fetchPeers(v *viper.Viper, l *zap.Logger) []pool.NodeParam {
var nodes []pool.NodeParam
for i := 0; ; i++ {
address := v.GetString(fmt.Sprintf(cfgFrostFSPeersAddressTmpl, i))
if address == "" {
break
}
priority := v.GetInt(fmt.Sprintf(cfgFrostFSPeersPriorityTmpl, i))
if priority <= 0 { // unspecified or wrong
priority = 1
}
weight := v.GetFloat64(fmt.Sprintf(cfgFrostFSPeersWeightTmpl, i))
if weight <= 0 { // unspecified or wrong
weight = 1
}
nodes = append(nodes, pool.NewNodeParam(priority, address, weight))
l.Info(logs.AddedStoragePeer,
zap.String("address", address),
zap.Int("priority", priority),
zap.Float64("weight", weight))
}
return nodes
}
func fetchConnectTimeout(cfg *viper.Viper) time.Duration {
connTimeout := cfg.GetDuration(cfgFrostFSConnectTimeout)
if connTimeout <= 0 {
connTimeout = defaultFrostFSConnectTimeout
}
return connTimeout
}
func fetchStreamTimeout(cfg *viper.Viper) time.Duration {
streamTimeout := cfg.GetDuration(cfgFrostFSStreamTimeout)
if streamTimeout <= 0 {
streamTimeout = defaultFrostFSStreamTimeout
}
return streamTimeout
}
func fetchHealthCheckTimeout(cfg *viper.Viper) time.Duration {
healthCheckTimeout := cfg.GetDuration(cfgFrostFSHealthcheckTimeout)
if healthCheckTimeout <= 0 {
healthCheckTimeout = defaultFrostFSHealthcheckTimeout
}
return healthCheckTimeout
}
func fetchRebalanceInterval(cfg *viper.Viper) time.Duration {
rebalanceInterval := cfg.GetDuration(cfgFrostFSRebalanceInterval)
if rebalanceInterval <= 0 {
rebalanceInterval = defaultFrostFSRebalanceInterval
}
return rebalanceInterval
}
func fetchErrorThreshold(cfg *viper.Viper) uint32 {
errorThreshold := cfg.GetUint32(cfgFrostFSPoolErrorThreshold)
if errorThreshold <= 0 {
errorThreshold = defaultFrostFSPoolErrorThreshold
}
return errorThreshold
}
func fetchJobFetcherBuffer(cfg *viper.Viper) int {
bufferSize := cfg.GetInt(cfgLifecycleJobFetcherBuffer)
if bufferSize <= 0 {
bufferSize = defaultLifecycleJobFetcherBuffer
}
return bufferSize
}
func fetchExecutorPoolSize(cfg *viper.Viper) int {
val := cfg.GetInt(cfgLifecycleExecutorPoolSize)
if val <= 0 {
val = defaultLifecycleExecutorPoolSize
}
return val
}
func fetchMorphReconnectClientsInterval(cfg *viper.Viper) time.Duration {
val := cfg.GetDuration(cfgMorphReconnectClientsInterval)
if val <= 0 {
val = defaultMorphReconnectClientsInterval
}
return val
}
func fetchMorphDialTimeout(cfg *viper.Viper) time.Duration {
val := cfg.GetDuration(cfgMorphDialTimeout)
if val <= 0 {
val = defaultMorphDialTimeout
}
return val
}
func fetchLifecycleServices(v *viper.Viper) (keys.PublicKeys, error) {
configKeys := v.GetStringSlice(cfgLifecycleServices)
result := make(keys.PublicKeys, 0, len(configKeys))
uniqKeys := make(map[string]struct{}, len(configKeys))
for _, configKey := range configKeys {
if _, ok := uniqKeys[configKey]; ok {
continue
}
k, err := keys.NewPublicKeyFromString(configKey)
if err != nil {
return nil, fmt.Errorf("key '%s': %w", configKey, err)
}
result = append(result, k)
uniqKeys[configKey] = struct{}{}
}
return result, nil
}
func fetchCredentialSource(v *viper.Viper, l *zap.Logger) lifecycle.CredentialSource {
credUse := v.GetString(cfgCredentialUse)
var (
err error
credSource lifecycle.CredentialSource
)
switch credUse {
case "wallets":
if credSource, err = walletsource.New(fetchWalletsCredentials(v, l)); err != nil {
l.Fatal(logs.CouldntCreateWalletSource, zap.Error(err))
}
default:
l.Fatal(logs.UnknownCredentialSource, zap.String(cfgCredentialUse, credUse))
}
return credSource
}

View file

@ -1,65 +0,0 @@
# Wallet
# Path to wallet
S3_LIFECYCLER_WALLET_PATH=/path/to/wallet.json
# Account address. If omitted default one will be used.
S3_LIFECYCLER_WALLET_ADDRESS=NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
# Passphrase to decrypt wallet.
S3_LIFECYCLER_WALLET_PASSPHRASE=pwd
# Logger
S3_LIFECYCLER_LOGGER_LEVEL=debug
S3_LIFECYCLER_LOGGER_DESTINATION=stdout
S3_LIFECYCLER_LOGGER_SAMPLING_ENABLED=false
S3_LIFECYCLER_LOGGER_SAMPLING_INITIAL=100
S3_LIFECYCLER_LOGGER_SAMPLING_THEREAFTER=100
S3_LIFECYCLER_LOGGER_SAMPLING_INTERVAL=1s
# Metrics
S3_LIFECYCLER_PPROF_ENABLED=false
S3_LIFECYCLER_PPROF_ADDRESS=localhost:8077
S3_LIFECYCLER_PROMETHEUS_ENABLED=false
S3_LIFECYCLER_PROMETHEUS_ADDRESS=localhost:8078
# Morph chain
S3_LIFECYCLER_MORPH_RPC_ENDPOINT_0_ADDRESS="wss://rpc1.morph.frostfs.info:40341/ws"
S3_LIFECYCLER_MORPH_RPC_ENDPOINT_0_PRIORITY=0
S3_LIFECYCLER_MORPH_RPC_ENDPOINT_0_TRUSTED_CA_LIST="/path/to/ca.pem"
S3_LIFECYCLER_MORPH_RPC_ENDPOINT_0_CERTIFICATE="/path/to/cert"
S3_LIFECYCLER_MORPH_RPC_ENDPOINT_0_KEY="/path/to/key"
S3_LIFECYCLER_MORPH_RPC_ENDPOINT_1_ADDRESS="wss://rpc2.morph.frostfs.info:40341/ws"
S3_LIFECYCLER_MORPH_RPC_ENDPOINT_1_PRIORITY=2
S3_LIFECYCLER_MORPH_RECONNECT_CLIENTS_INTERVAL=30s
S3_LIFECYCLER_MORPH_RECONNECT_DIAL_TIMEOUT=5s
S3_LIFECYCLER_MORPH_CONTRACT_NETMAP=netmap.frostfs
S3_LIFECYCLER_MORPH_CONTRACT_FROSTFSID=frostfsid.frostfs
S3_LIFECYCLER_MORPH_CONTRACT_CONTAINER=container.frostfs
# Credential source
S3_LIFECYCLER_CREDENTIAL_USE=wallets
S3_LIFECYCLER_CREDENTIAL_SOURCE_WALLETS_0_PATH=/path/to/user/wallet.json
S3_LIFECYCLER_CREDENTIAL_SOURCE_WALLETS_0_ADDRESS=NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
S3_LIFECYCLER_CREDENTIAL_SOURCE_WALLETS_0_PASSPHRASE=""
# Lifecycle
S3_LIFECYCLER_LIFECYCLE_JOB_FETCHER_BUFFER=1000
S3_LIFECYCLER_LIFECYCLE_EXECUTOR_POOL_SIZE=100
S3_LIFECYCLER_LIFECYCLE_SERVICES=0313b1ac3a8076e155a7e797b24f0b650cccad5941ea59d7cfd51a024a8b2a06bf 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a
# FrostFS
S3_LIFECYCLER_FROSTFS_STREAM_TIMEOUT=10s
S3_LIFECYCLER_FROSTFS_CONNECT_TIMEOUT=10s
S3_LIFECYCLER_FROSTFS_HEALTHCHECK_TIMEOUT=15s
S3_LIFECYCLER_FROSTFS_REBALANCE_INTERVAL=60s
S3_LIFECYCLER_FROSTFS_POOL_ERROR_THRESHOLD=100
S3_LIFECYCLER_FROSTFS_TREE_POOL_MAX_ATTEMPTS=4
S3_LIFECYCLER_FROSTFS_PEERS_0_ADDRESS=node1.frostfs:8080
S3_LIFECYCLER_FROSTFS_PEERS_0_PRIORITY=1
S3_LIFECYCLER_FROSTFS_PEERS_0_WEIGHT=1
S3_LIFECYCLER_FROSTFS_PEERS_1_ADDRESS=node2.frostfs:8080
S3_LIFECYCLER_FROSTFS_PEERS_1_PRIORITY=2
S3_LIFECYCLER_FROSTFS_PEERS_1_WEIGHT=0.1
S3_LIFECYCLER_FROSTFS_PEERS_2_ADDRESS=node3.frostfs:8080
S3_LIFECYCLER_FROSTFS_PEERS_2_PRIORITY=2
S3_LIFECYCLER_FROSTFS_PEERS_2_WEIGHT=0.9

View file

@ -1,79 +0,0 @@
# Wallet address, path to the wallet must be set as cli parameter or environment variable
wallet:
path: /path/to/wallet.json # Path to wallet
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP # Account address. If omitted default one will be used.
passphrase: "" # Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
logger:
level: info # Log level.
destination: stdout # Logging destination.
sampling:
enabled: false
initial: 100
thereafter: 100
interval: 1s
pprof:
enabled: false
address: localhost:8077 # Endpoint for service profiling
prometheus:
enabled: false
address: localhost:8078 # Endpoint for service metrics
morph:
rpc_endpoint:
- address: wss://rpc1.morph.frostfs.info:40341/ws
priority: 1
trusted_ca_list:
- "/path/to/ca.pem"
certificate: "/path/to/cert"
key: "/path/to/key"
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
reconnect_clients_interval: 30s
dial_timeout: 5s
contract:
netmap: netmap.frostfs
frostfsid: frostfsid.frostfs
container: container.frostfs
credential:
use: wallets
source:
wallets:
- path: /path/to/wallet.json
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
passphrase: ""
lifecycle:
job_fetcher_buffer: 1000
executor_pool_size: 100
services:
- 0313b1ac3a8076e155a7e797b24f0b650cccad5941ea59d7cfd51a024a8b2a06bf
frostfs:
stream_timeout: 10s
connect_timeout: 10s
healthcheck_timeout: 15s
rebalance_interval: 60s
pool_error_threshold: 100
tree_pool_max_attempts: 4
peers:
0:
priority: 1
weight: 1
address: s01.frostfs.devenv:8080
1:
priority: 2
weight: 1
address: s02.frostfs.devenv:8080
2:
priority: 2
weight: 1
address: s03.frostfs.devenv:8080
3:
priority: 2
weight: 1
address: s04.frostfs.devenv:8080

View file

@ -1,3 +0,0 @@
pprof:
enabled: false
address: localhost:8077

View file

@ -1,3 +0,0 @@
prometheus:
enabled: false
address: localhost:8078

View file

@ -1,254 +0,0 @@
# FrostFS S3 Lifecycler configuration
This section contains detailed FrostFS S3 Lifecycler component configuration description.
# Structure
| Section | Description |
|---------------------|--------------------------------------------------------|
| `wallet` | [Wallet configuration](#wallet-section) |
| `logger` | [Logger configuration](#logger-section) |
| `pprof` | [Pprof configuration](#pprof-section) |
| `prometheus` | [Prometheus configuration](#prometheus-section) |
| `morph` | [Morph configuration](#morph-section) |
| `credential_source` | [Credential source configuration](#credential-section) |
| `lifecycle` | [Lifecycle configuration](#lifecycle-section) |
| `frostfs` | [FrostFS configuration](#frostfs-section) |
### Reload on SIGHUP
Some config values can be reloaded on SIGHUP signal.
Such parameters have special mark in tables below.
You can send SIGHUP signal to app using the following command:
```shell
$ kill -s SIGHUP <app_pid>
```
# `wallet` section
Configuration of key for lifecycle service.
```yaml
wallet:
path: /path/to/wallet.json
address: Nhfg3TbpwogLvDGVvAvqyThbsHgoSUKwtn
passphrase: ""
```
| Parameter | Type | Default value | Description |
|--------------|----------|---------------|--------------------------------------------------------------------------|
| `path` | `string` | | Path to wallet |
| `address` | `string` | | Account address to get from wallet. If omitted default one will be used. |
| `passphrase` | `string` | | Passphrase to decrypt wallet. |
# `logger` section
```yaml
logger:
level: info
destination: stdout
sampling:
enabled: false
initial: 100
thereafter: 100
interval: 1s
tags:
- "expiration_delete_object"
- "multipart_delete_object"
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|------------|---------------|---------------|----------------------------------------------------------------------------------------------------|
| `level` | `string` | yes | `info` | Logging level. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
| `sampling.enabled` | `bool` | no | `false` | Enable sampling. |
| `sampling.initial` | `int` | no | `100` | Logs firth N of the same (level and message) log entries each interval. |
| `sampling.thereafter` | `int` | no | `100` | Logs every Mth of the same (level and message) log entries after first N entries in that interval. |
| `sampling.interval` | `duration` | no | `1s` | Sampling interval. |
| `sampling.tags` | `[]string` | no | | Tagged log entries that should be additionally logged (available tags see in the next section). |
## Tags
There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags`
parameter. Available tags:
* `expiration_delete_object`
* `expiration_process_version`
* `expiration_remove_version`
* `multipart_delete_object`
* `multipart_process_upload`
# `pprof` section
Contains configuration for the `pprof` profiler.
```yaml
pprof:
enabled: false
address: localhost:8077
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|----------|---------------|---------------|-----------------------------------------------|
| `enabled` | `bool` | yes | `false` | Flag to enable pprof service. |
| `address` | `string` | yes | | Address that pprof service listener binds to. |
# `prometheus` section
Contains configuration for the `prometheus` metrics service.
```yaml
prometheus:
enabled: false
address: localhost:8078
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|----------|---------------|---------------|----------------------------------------------------|
| `enabled` | `bool` | yes | `false` | Flag to enable prometheus service. |
| `address` | `string` | yes | | Address that prometheus service listener binds to. |
# `morph` section
Contains configuration for the `morph` chain.
```yaml
morph:
rpc_endpoint:
- address: wss://rpc1.morph.frostfs.info:40341/ws
priority: 0
trusted_ca_list:
- "/path/to/ca.pem"
certificate: "/path/to/cert"
key: "/path/to/key"
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
reconnect_clients_interval: 30s
dial_timeout: 5s
contract:
netmap: netmap.frostfs
frostfsid: frostfsid.frostfs
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|--------------------------------|------------|---------------|---------------------|------------------------------------------------------------------------------------------------------------------|
| `rpc_endpoint.address` | `string` | no | | The address of the RPC host to connect. |
| `rpc_endpoint.priority` | `int` | no | | Priority of RPC endpoint. |
| `rpc_endpoint.trusted_ca_list` | `[]string` | no | | List of paths to CAs to use in mTLS configuration. |
| `rpc_endpoint.certificate` | `string` | no | | Path to certificate to use in mTLS configuration. |
| `rpc_endpoint.key` | `string` | no | | Path to key to use in mTLS configuration. |
| `reconnect_clients_interval` | `string` | no | `30s` | When all endpoints are failed. Overall connection be reinitialized. This value is time between retries. |
| `reconnect_clients_interval` | `string` | no | `5s` | Dial timeout to connect to morph endpoint. |
| `contract.netmap` | `string` | no | `netmap.frostfs` | Netmap contract hash (LE) or name in NNS. |
| `contract.frostfsid` | `string` | no | `frostfsid.frostfs` | FrostfsID contract hash (LE) or name in NNS. This contract is used to get all users to process their containers. |
| `contract.container` | `string` | no | `container.frostfs` | Container contract hash (LE) or name in NNS. |
# `credential` section
Contains configuration for the source of user private keys (credentials).
```yaml
credential:
use: wallets
source:
wallets:
- path: /path/to/wallet.json
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
passphrase: ""
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|----------|---------------|---------------|-------------------------------|
| `use` | `string` | no | | Name of source to use. |
| `source` | `map` | no | | Available credential sources. |
## `wallets` subsection
Source of user private keys as wallets files on filesystem.
```yaml
credential:
source:
wallets:
- path: /path/to/wallet.json
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
passphrase: ""
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|--------------|----------|---------------|---------------|-----------------------------------------------------------------|
| `path` | `string` | no | | Path to wallet on filesystem. |
| `address` | `string` | no | | Account address in wallet. If omitted default one will be used. |
| `passphrase` | `string` | no | | Passphrase to decrypt wallet. |
# `lifecycle` section
Configuration for main lifecycle handling procedure.
```yaml
lifecycle:
job_fetcher_buffer: 1000
executor_pool_size: 100
services:
- 0313b1ac3a8076e155a7e797b24f0b650cccad5941ea59d7cfd51a024a8b2a06bf
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------------|------------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `job_fetcher_buffer` | `int` | no | `1000` | Size for buffered channel to fetch users/container and other data for lifecycle procedure. This param helps reduce number concurrent outgoing network requests. |
| `executor_pool_size` | `int` | no | `100` | Worker pool size to tidy container up (according to lifecycle configuration). |
| `services` | `[]string` | yes | | List of Lifecycle services public keys. Needs to split jobs. |
# `frostfs` section
Configuration for FrostFS storage.
```yaml
frostfs:
stream_timeout: 10s
connect_timeout: 10s
healthcheck_timeout: 5s
rebalance_interval: 1m
pool_error_threshold: 100
tree_pool_max_attempts: 4
peers:
0:
address: node1.frostfs:8080
priority: 1
weight: 1
1:
address: node2.frostfs:8080
priority: 2
weight: 0.1
2:
address: node3.frostfs:8080
priority: 2
weight: 0.9
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|--------------------------|------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------|
| `stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in streaming RPC. |
| `connect_timeout` | `duration` | no | `10s` | Timeout to connect to a storage node. |
| `healthcheck_timeout` | `duration` | no | `15s` | Timeout to check storage node health during rebalance. |
| `rebalance_interval` | `duration` | no | `60s` | Interval to check storage node health. |
| `pool_error_threshold` | `uint32` | no | `100` | The number of errors on connection after which storage node is considered as unhealthy. |
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
## `peers` section
This configuration makes TO-IAM use the first node (node1.frostfs:8080)
while it's healthy. Otherwise, TO-IAM uses the second node (node2.frostfs:8080)
for 10% of requests and the third node (node3.frostfs:8080) for 90% of requests.
Until nodes with the same priority level are healthy
nodes with other priority are not used.
The lower the value, the higher the priority.
| Parameter | Type | Default value | Description |
|------------------|----------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
| `peers.address` | `string` | | Address of storage node. |
| `peers.priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
| `peers.weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |

98
go.mod
View file

@ -1,98 +0,0 @@
module git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler
go 1.22
require (
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-node v0.42.0-rc.5
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.30.1-0.20240802133702-0322c0c0b253
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240722061523-7e94a6adf2ba
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240611102930-ac965e8d176a
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/nspcc-dev/neo-go v0.106.2
github.com/panjf2000/ants/v2 v2.9.0
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.6.1
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.19.0
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
go.uber.org/zap v1.27.0
golang.org/x/text v0.16.0
)
replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240611123832-594f716b3d18
require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164 // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/aws/aws-sdk-go v1.44.6 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-chi/chi/v5 v5.0.8 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/minio/sio v0.3.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/urfave/cli v1.22.14 // indirect
go.etcd.io/bbolt v1.3.9 // indirect
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/term v0.18.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
google.golang.org/grpc v1.63.2 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

353
go.sum
View file

@ -1,353 +0,0 @@
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164 h1:XxvwQKJT/f16qS3df5PBQPRYKkhy0/A7zH6644QpKD0=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-node v0.42.0-rc.5 h1:lVWO3JtF3R4Irb+/xT5+wY0oMOPgRTytHichxm+nIjk=
git.frostfs.info/TrueCloudLab/frostfs-node v0.42.0-rc.5/go.mod h1:IZBD+sRxSxpXXIkg0rAK5yvkGHZUaHBqmcWFu2UmbmQ=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.30.1-0.20240802133702-0322c0c0b253 h1:X51wg0Kr0yJxb6YJxHTUxqgFj6fNy6ShRcgM0FepOko=
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.30.1-0.20240802133702-0322c0c0b253/go.mod h1:ZWARi0rzNgCD/RjHEYGg4z3pKaZap8ytU6gOJ+lCD5g=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240722061523-7e94a6adf2ba h1:OP5fCRRQ5ndRmAYyuLr7rBOMz5nKrB+o9B1XZ+mm3XY=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240722061523-7e94a6adf2ba/go.mod h1:vluJ/+yQMcq8ZIZZSA7Te+JKClr0lgtRErjICvb8wto=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240611123832-594f716b3d18 h1:JRjwcHaQajTbSCBCK3yZnqvyHvgWBaoThDGuT4kvIIc=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240611123832-594f716b3d18/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240611102930-ac965e8d176a h1:Bk1fB4cQASPKgAVGCdlBOEp5ohZfDxqK6fZM8eP+Emo=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240611102930-ac965e8d176a/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8=
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4=
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 h1:npHgfD4Tl2WJS3AJaMUi5ynGDPUBfkg3U3fCzDyXZ+4=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/aws/aws-sdk-go v1.44.6 h1:Y+uHxmZfhRTLX2X3khkdxCoTZAyGEX21aOUHe1U6geg=
github.com/aws/aws-sdk-go v1.44.6/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 h1:tYj5Ydh5D7Xg2R1tJnoG36Yta7NVB8C0vx36oPA3Bbw=
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0=
github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus=
github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/nspcc-dev/dbft v0.2.0 h1:sDwsQES600OSIMncV176t2SX5OvB14lzeOAyKFOkbMI=
github.com/nspcc-dev/dbft v0.2.0/go.mod h1:oFE6paSC/yfFh9mcNU6MheMGOYXK9+sPiRk3YMoz49o=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc=
github.com/nspcc-dev/hrw v1.0.9 h1:17VcAuTtrstmFppBjfRiia4K2wA/ukXZhLFS8Y8rz5Y=
github.com/nspcc-dev/hrw v1.0.9/go.mod h1:l/W2vx83vMQo6aStyx2AuZrJ+07lGv2JQGlVkPG06MU=
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k=
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
github.com/nspcc-dev/neofs-api-go/v2 v2.14.0 h1:jhuN8Ldqz7WApvUJRFY0bjRXE1R3iCkboMX5QVZhHVk=
github.com/nspcc-dev/neofs-api-go/v2 v2.14.0/go.mod h1:DRIr0Ic1s+6QgdqmNFNLIqMqd7lNMJfYwkczlm1hDtM=
github.com/nspcc-dev/neofs-crypto v0.4.0 h1:5LlrUAM5O0k1+sH/sktBtrgfWtq1pgpDs09fZo+KYi4=
github.com/nspcc-dev/neofs-crypto v0.4.0/go.mod h1:6XJ8kbXgOfevbI2WMruOtI+qUJXNwSGM/E9eClXxPHs=
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.11 h1:QOc8ZRN5DXlAeRPh5QG9u8rMLgoeRNiZF5/vL7QupWg=
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.11/go.mod h1:W+ImTNRnSNMH8w43H1knCcIqwu7dLHePXtlJNZ7EFIs=
github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM=
github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc=
github.com/nspcc-dev/tzhash v1.7.0 h1:/+aL33NC7y5OIGnY2kYgjZt8mg7LVGFMdj/KAJLndnk=
github.com/nspcc-dev/tzhash v1.7.0/go.mod h1:Dnx9LUlOLr5paL2Rtc96x0PPs8D9eIkUtowt1n+KQus=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo=
github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU=
github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=
github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 h1:zr8ymM5OWWjjiWRzwTfZ67c905+2TMHYp2lMJ52QTyM=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0/go.mod h1:sQs7FT2iLVJ+67vYngGJkPe1qr39IzaBzaj9IDNNY8k=
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ=
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

22
help.mk
View file

@ -1,22 +0,0 @@
.PHONY: help
# Show this help prompt
help:
@echo ' Usage:'
@echo ''
@echo ' make <target>'
@echo ''
@echo ' Targets:'
@echo ''
@awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9.%_/-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
# Show help for docker/% IGNORE
help.docker/%:
$(eval TARGETS:=$(notdir all lint) ${BINS})
@echo ' Usage:'
@echo ''
@echo ' make docker/% -- Run `make %` in Golang container'
@echo ''
@echo ' Supported docker targets:'
@echo ''
@$(foreach bin, $(TARGETS), echo ' ' $(bin);)

View file

@ -1,77 +0,0 @@
package walletsource
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/lifecycle"
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
)
type Source struct {
keys []*keys.PrivateKey
}
type Wallet struct {
Path string
Address string
Passphrase string
}
var _ lifecycle.CredentialSource = (*Source)(nil)
func New(wallets []Wallet) (*Source, error) {
privateKeys := make([]*keys.PrivateKey, len(wallets))
var err error
for i, w := range wallets {
if privateKeys[i], err = readPrivateKey(w); err != nil {
return nil, fmt.Errorf("read private key from wallet '%s': %w", w.Path, err)
}
}
return &Source{keys: privateKeys}, nil
}
func (s *Source) Credentials(_ context.Context, pk *keys.PublicKey) (*keys.PrivateKey, error) {
for _, key := range s.keys {
if key.PublicKey().Equal(pk) {
return key, nil
}
}
return nil, errors.New("key not found")
}
func readPrivateKey(walletInfo Wallet) (*keys.PrivateKey, error) {
w, err := wallet.NewWalletFromFile(walletInfo.Path)
if err != nil {
return nil, fmt.Errorf("parse wallet: %w", err)
}
var addr util.Uint160
if walletInfo.Address == "" {
addr = w.GetChangeAddress()
} else {
addr, err = flags.ParseAddress(walletInfo.Address)
if err != nil {
return nil, fmt.Errorf("invalid address")
}
}
acc := w.GetAccount(addr)
if acc == nil {
return nil, fmt.Errorf("couldn't find wallet account for %s", address.Uint160ToString(addr))
}
if err = acc.Decrypt(walletInfo.Passphrase, w.Scrypt); err != nil {
return nil, fmt.Errorf("couldn't decrypt account: %w", err)
}
return acc.PrivateKey(), nil
}

View file

@ -1,66 +0,0 @@
package frostfs
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"go.uber.org/zap"
)
// FrostFS represents virtual connection to the FrostFS network.
// It is used to provide an interface to dependent packages
// which work with FrostFS.
type FrostFS struct {
pool *pool.Pool
log *zap.Logger
}
// NewFrostFS creates new FrostFS using provided pool.Pool.
func NewFrostFS(p *pool.Pool, log *zap.Logger) *FrostFS {
return &FrostFS{
pool: p,
log: log,
}
}
func (f *FrostFS) GetObject(ctx context.Context, addr oid.Address) (pool.ResGetObject, error) {
var prm pool.PrmObjectGet
prm.SetAddress(addr)
addBearer(ctx, &prm)
return f.pool.GetObject(ctx, prm)
}
func (f *FrostFS) DeleteObject(ctx context.Context, addr oid.Address) error {
var prm pool.PrmObjectDelete
prm.SetAddress(addr)
addBearer(ctx, &prm)
return f.pool.DeleteObject(ctx, prm)
}
func (f *FrostFS) NetworkInfo(ctx context.Context) (*netmap.NetworkInfo, error) {
networkInfo, err := f.pool.NetworkInfo(ctx)
if err != nil {
return nil, fmt.Errorf("get network info via client: %w", err)
}
return &networkInfo, nil
}
type WithBearerParam interface {
UseBearer(token bearer.Token)
}
func addBearer(ctx context.Context, prm WithBearerParam) {
if bd, err := middleware.GetBoxData(ctx); err == nil {
if bd.Gate.BearerToken != nil {
prm.UseBearer(*bd.Gate.BearerToken)
}
}
}

View file

@ -1,273 +0,0 @@
package frostfs
import (
"context"
"errors"
"fmt"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
)
type GetNodeByPathResponseInfoWrapper struct {
response *grpcService.GetNodeByPathResponse_Info
}
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
return []uint64{n.response.GetNodeId()}
}
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
return []uint64{n.response.GetParentId()}
}
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
return []uint64{n.response.GetTimestamp()}
}
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
type GetSubTreeResponseBodyWrapper struct {
response *grpcService.GetSubTreeResponse_Body
}
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
return n.response.GetNodeId()
}
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
resp := n.response.GetParentId()
if resp == nil {
// storage sends nil that should be interpreted as []uint64{0}
// due to protobuf compatibility, see 'GetSubTree' function
return []uint64{0}
}
return resp
}
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
return n.response.GetTimestamp()
}
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
type TreePoolWrapper struct {
p *treepool.Pool
}
func NewTreePoolWrapper(p *treepool.Pool) *TreePoolWrapper {
return &TreePoolWrapper{p: p}
}
func (w *TreePoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
poolPrm := treepool.GetNodesParams{
CID: prm.BktInfo.CID,
TreeID: prm.TreeID,
Path: prm.Path,
Meta: prm.Meta,
PathAttribute: tree.FileNameKey,
LatestOnly: prm.LatestOnly,
AllAttrs: prm.AllAttrs,
BearerToken: getBearer(ctx, prm.BktInfo),
}
nodes, err := w.p.GetNodes(ctx, poolPrm)
if err != nil {
return nil, handleError(err)
}
res := make([]tree.NodeResponse, len(nodes))
for i, info := range nodes {
res[i] = GetNodeByPathResponseInfoWrapper{info}
}
return res, nil
}
func (w *TreePoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32) ([]tree.NodeResponse, error) {
poolPrm := treepool.GetSubTreeParams{
CID: bktInfo.CID,
TreeID: treeID,
RootID: rootID,
Depth: depth,
BearerToken: getBearer(ctx, bktInfo),
Order: treepool.AscendingOrder,
}
if len(rootID) == 1 && rootID[0] == 0 {
// storage node interprets 'nil' value as []uint64{0}
// gate wants to send 'nil' value instead of []uint64{0}, because
// it provides compatibility with previous tree service api where
// single uint64(0) value is dropped from signature
poolPrm.RootID = nil
}
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
if err != nil {
return nil, handleError(err)
}
var subtree []tree.NodeResponse
node, err := subTreeReader.Next()
for err == nil {
subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
node, err = subTreeReader.Next()
}
if err != nil && err != io.EOF {
return nil, handleError(err)
}
return subtree, nil
}
type SubTreeStreamImpl struct {
r *treepool.SubTreeReader
buffer []*grpcService.GetSubTreeResponse_Body
eof bool
index int
ln int
}
const bufSize = 1000
func (s *SubTreeStreamImpl) Next() (tree.NodeResponse, error) {
if s.index != -1 {
node := s.buffer[s.index]
s.index++
if s.index >= s.ln {
s.index = -1
}
return GetSubTreeResponseBodyWrapper{response: node}, nil
}
if s.eof {
return nil, io.EOF
}
var err error
s.ln, err = s.r.Read(s.buffer)
if err != nil {
if err != io.EOF {
return nil, fmt.Errorf("sub tree stream impl pool wrap: %w", handleError(err))
}
s.eof = true
}
if s.ln > 0 {
s.index = 0
}
return s.Next()
}
func (w *TreePoolWrapper) GetSubTreeStream(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32) (tree.SubTreeStream, error) {
poolPrm := treepool.GetSubTreeParams{
CID: bktInfo.CID,
TreeID: treeID,
RootID: rootID,
Depth: depth,
BearerToken: getBearer(ctx, bktInfo),
Order: treepool.AscendingOrder,
}
if len(rootID) == 1 && rootID[0] == 0 {
// storage node interprets 'nil' value as []uint64{0}
// gate wants to send 'nil' value instead of []uint64{0}, because
// it provides compatibility with previous tree service api where
// single uint64(0) value is dropped from signature
poolPrm.RootID = nil
}
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
if err != nil {
return nil, handleError(err)
}
return &SubTreeStreamImpl{
r: subTreeReader,
buffer: make([]*grpcService.GetSubTreeResponse_Body, bufSize),
index: -1,
}, nil
}
func (w *TreePoolWrapper) AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parent uint64, meta map[string]string) (uint64, error) {
nodeID, err := w.p.AddNode(ctx, treepool.AddNodeParams{
CID: bktInfo.CID,
TreeID: treeID,
Parent: parent,
Meta: meta,
BearerToken: getBearer(ctx, bktInfo),
})
return nodeID, handleError(err)
}
func (w *TreePoolWrapper) AddNodeByPath(ctx context.Context, bktInfo *data.BucketInfo, treeID string, path []string, meta map[string]string) (uint64, error) {
nodeID, err := w.p.AddNodeByPath(ctx, treepool.AddNodeByPathParams{
CID: bktInfo.CID,
TreeID: treeID,
Path: path,
Meta: meta,
PathAttribute: tree.FileNameKey,
BearerToken: getBearer(ctx, bktInfo),
})
return nodeID, handleError(err)
}
func (w *TreePoolWrapper) MoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID, parentID uint64, meta map[string]string) error {
return handleError(w.p.MoveNode(ctx, treepool.MoveNodeParams{
CID: bktInfo.CID,
TreeID: treeID,
NodeID: nodeID,
ParentID: parentID,
Meta: meta,
BearerToken: getBearer(ctx, bktInfo),
}))
}
func (w *TreePoolWrapper) RemoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID uint64) error {
return handleError(w.p.RemoveNode(ctx, treepool.RemoveNodeParams{
CID: bktInfo.CID,
TreeID: treeID,
NodeID: nodeID,
BearerToken: getBearer(ctx, bktInfo),
}))
}
func getBearer(ctx context.Context, bktInfo *data.BucketInfo) []byte {
if bd, err := middleware.GetBoxData(ctx); err == nil {
if bd.Gate.BearerToken != nil {
if bd.Gate.BearerToken.Impersonate() || bktInfo.Owner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
return bd.Gate.BearerToken.Marshal()
}
}
}
return nil
}
func handleError(err error) error {
if err == nil {
return nil
}
if errors.Is(err, treepool.ErrNodeNotFound) {
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
}
if errors.Is(err, treepool.ErrNodeAccessDenied) {
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
}
return err
}

View file

@ -1,37 +0,0 @@
package lifecycle
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
)
type UserFetcher interface {
Users() ([]util.Uint160, error)
UserKey(hash util.Uint160) (*keys.PublicKey, error)
}
type ContainerFetcher interface {
Containers(owner user.ID) ([]cid.ID, error)
}
type TreeFetcher interface {
GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.Address, error)
}
type FrostFSFetcher interface {
GetObject(ctx context.Context, addr oid.Address) (pool.ResGetObject, error)
NetworkInfo(ctx context.Context) (*netmap.NetworkInfo, error)
DeleteObject(ctx context.Context, addr oid.Address) error
}
type CredentialSource interface {
Credentials(ctx context.Context, pk *keys.PublicKey) (*keys.PrivateKey, error)
}

View file

@ -1,785 +0,0 @@
package lifecycle
import (
"context"
"crypto/ecdsa"
"crypto/rand"
"errors"
"fmt"
"io"
"math"
"sort"
"strconv"
"strings"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
// nowTime is a value for getting the current time. This value can be overridden
// for testing mocking out current time.
var nowTime = time.Now
type Executor struct {
log *zap.Logger
jobs <-chan Job
pool *ants.Pool
tree *tree.Tree
frostfs FrostFSFetcher
done chan struct{}
}
type ExecutorConfig struct {
Logger *zap.Logger
Jobs <-chan Job
WorkerPoolSize int
TreeFetcher *tree.Tree
FrostFSFetcher FrostFSFetcher
}
type logWrapper struct {
log *zap.Logger
}
func (l *logWrapper) Printf(format string, args ...interface{}) {
l.log.Info(fmt.Sprintf(format, args...))
}
const (
statusDisabled = "Disabled"
)
func NewExecutor(ctx context.Context, cfg ExecutorConfig) (*Executor, error) {
e := &Executor{
log: cfg.Logger,
jobs: cfg.Jobs,
tree: cfg.TreeFetcher,
frostfs: cfg.FrostFSFetcher,
done: make(chan struct{}),
}
var err error
e.pool, err = ants.NewPool(cfg.WorkerPoolSize, ants.WithLogger(&logWrapper{cfg.Logger}))
if err != nil {
return nil, fmt.Errorf("coudln't init worker pool: %w", err)
}
go e.workerRoutine(ctx)
return e, nil
}
func (e *Executor) Done() {
<-e.done
}
func (e *Executor) workerRoutine(ctx context.Context) {
var (
wg sync.WaitGroup
err error
)
LOOP:
for {
select {
case <-ctx.Done():
e.log.Info(logs.ExecutorStopped, zap.Error(ctx.Err()))
break LOOP
case job, ok := <-e.jobs:
if !ok {
e.log.Info(logs.ExecutorStoppedJobsChannelIsClosed)
break LOOP
}
wg.Add(1)
err = e.pool.Submit(func() {
defer wg.Done()
if inErr := e.worker(ctx, job); inErr != nil {
e.log.Warn(logs.WorkerFailedToHandleJob, zap.Uint64("epoch", job.Epoch),
zap.String("cid", job.ContainerID.EncodeToString()), zap.Error(inErr))
}
})
if err != nil {
wg.Done()
e.log.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
}
}
}
wg.Wait()
e.pool.Release()
close(e.done)
}
func (e *Executor) worker(ctx context.Context, job Job) error {
e.log.Debug(logs.ProcessingJob, zap.String("user", job.PrivateKey.Address()), zap.String("cid", job.ContainerID.EncodeToString()))
ctx = addBearerToContext(ctx, job.Bearer)
var userID user.ID
user.IDFromKey(&userID, (ecdsa.PublicKey)(*job.PrivateKey.PublicKey()))
bktInfo := &data.BucketInfo{
CID: job.ContainerID,
Owner: userID,
}
settings, err := e.tree.GetSettingsNode(ctx, bktInfo)
if err != nil {
return fmt.Errorf("get settings node: %w", err)
}
ni, err := e.frostfs.NetworkInfo(ctx)
if err != nil {
return fmt.Errorf("get network info: %w", err)
}
if ni.CurrentEpoch() != job.Epoch {
e.log.Warn(logs.EpochMismatched, zap.Uint64("job epoch", job.Epoch), zap.Uint64("network info epoch", ni.CurrentEpoch()))
ni.SetCurrentEpoch(job.Epoch)
}
if err = e.abortMultiparts(ctx, ni, job.LifecycleConfiguration.Rules, bktInfo); err != nil {
e.log.Warn(logs.AbortMultipartUploads, zap.Uint64("epoch", job.Epoch),
zap.Stringer("cid", job.ContainerID), zap.Error(err))
}
if err = e.expireObjects(ctx, ni, job.LifecycleConfiguration.Rules, bktInfo, settings); err != nil {
e.log.Warn(logs.ExpireObjects, zap.Uint64("epoch", job.Epoch),
zap.Stringer("cid", job.ContainerID), zap.Error(err))
}
return nil
}
const creationEpochKV = "CreationEpoch"
func (e *Executor) abortMultiparts(ctx context.Context, ni *netmap.NetworkInfo, rules []data.LifecycleRule, bktInfo *data.BucketInfo) error {
if len(rules) == 0 {
return nil
}
matcherFn, err := e.matchMultipartByRulesFn(ni, rules)
if err != nil {
return fmt.Errorf("form multiaprt matcher: %w", err)
}
multiparts, err := e.tree.GetMultipartUploadsByPrefix(ctx, bktInfo, "")
if err != nil {
return fmt.Errorf("list multiparts: %w", err)
}
for _, multipart := range multiparts {
e.log.Debug(logs.TagMultipartProcessUpload, zap.String("key", multipart.Key), zap.String("upload_id", multipart.UploadID), zap.Uint64("node_id", multipart.ID))
if !matcherFn(multipart) {
continue
}
if err = e.abortMultipart(ctx, bktInfo, multipart); err != nil {
e.log.Warn(logs.FailedToAbortMultipart, zap.String("key", multipart.Key),
zap.String("upload_id", multipart.UploadID), zap.Error(err))
continue
}
}
return nil
}
func (e *Executor) matchMultipartByRulesFn(ni *netmap.NetworkInfo, rules []data.LifecycleRule) (func(*data.MultipartInfo) bool, error) {
matchers := make([]func(*data.MultipartInfo) bool, 0, len(rules))
for _, rule := range rules {
matchFn, err := e.matchMultipartByRuleFn(ni, rule)
if err != nil {
if errors.Is(err, errNotApplicableRule) {
continue
}
return nil, err
}
matchers = append(matchers, matchFn)
}
if len(matchers) == 0 {
return nil, errNotApplicableRule
}
return func(info *data.MultipartInfo) bool {
for _, matcher := range matchers {
if matcher(info) {
return true
}
}
return false
}, nil
}
var (
errNotApplicableRule = errors.New("not applicable rule")
errAllObjectMatcherFailed = errors.New("all object matcher failed")
)
func (e *Executor) matchMultipartByRuleFn(ni *netmap.NetworkInfo, rule data.LifecycleRule) (func(*data.MultipartInfo) bool, error) {
if rule.Status == statusDisabled {
return nil, errNotApplicableRule
}
if rule.AbortIncompleteMultipartUpload == nil || rule.AbortIncompleteMultipartUpload.DaysAfterInitiation == nil {
return nil, errNotApplicableRule
}
multipartDuration, err := durationToEpochsAbs(ni, 24*time.Hour*time.Duration(*rule.AbortIncompleteMultipartUpload.DaysAfterInitiation))
if err != nil {
return nil, fmt.Errorf("DaysAfterInitiation to epochs: %w", err)
}
var prefix string
matchMultipartByTags := func(*data.MultipartInfo) bool { return true }
if rule.Filter != nil {
filter := rule.Filter
prefix = filter.Prefix
if filter.And != nil {
prefix = filter.And.Prefix
}
}
return func(multipart *data.MultipartInfo) bool {
if multipart.Finished || !strings.HasPrefix(multipart.Key, prefix) || !matchMultipartByTags(multipart) {
return false
}
multipartCreationEpoch, err := creationEpoch(ni, multipart.Created, multipart.Meta)
if err != nil {
e.log.Warn(logs.FailedToGetMultipartCreationEpoch, zap.Error(err))
return false
}
return multipartCreationEpoch+multipartDuration <= ni.CurrentEpoch()
}, nil
}
func (e *Executor) expireObjects(ctx context.Context, ni *netmap.NetworkInfo, rules []data.LifecycleRule, bktInfo *data.BucketInfo, settings *data.BucketSettings) error {
if len(rules) == 0 {
return nil
}
matcherFn, err := e.matchObjectByRulesFn(ctx, ni, bktInfo, rules)
if err != nil {
return fmt.Errorf("form multiaprt matcher: %w", err)
}
objectStream, err := e.tree.InitVersionsByPrefixStream(ctx, bktInfo, "", false)
if err != nil {
return fmt.Errorf("list versions: %w", err)
}
var (
latestObjName string
versions []*data.NodeVersion
)
for {
nodeVersion, err := objectStream.Next(ctx)
if err != nil {
if errors.Is(err, io.EOF) {
if err = e.expireObject(ctx, versions, bktInfo, ni, matcherFn, settings); err != nil {
e.log.Warn(logs.FailedToExpireObject, zap.String("object", latestObjName), zap.Error(err))
}
return nil
}
return fmt.Errorf("get node version from stream: %w", err)
}
e.log.Debug(logs.TagExpirationProcessVersion, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", nodeVersion.OID),
zap.String("filepath", nodeVersion.FilePath), zap.Uint64("node_id", nodeVersion.ID))
if nodeVersion.FilePath != latestObjName {
if err = e.expireObject(ctx, versions, bktInfo, ni, matcherFn, settings); err != nil {
e.log.Warn(logs.FailedToExpireObject, zap.String("object", latestObjName), zap.Error(err))
}
latestObjName = nodeVersion.FilePath
versions = versions[:0]
}
versions = append(versions, nodeVersion)
}
}
type MatchObjectFunc = func(index int, versions []*data.NodeVersion) (bool, error)
func (e *Executor) matchObjectByRulesFn(ctx context.Context, ni *netmap.NetworkInfo, bktInfo *data.BucketInfo, rules []data.LifecycleRule) (MatchObjectFunc, error) {
matchers := make([]MatchObjectFunc, 0, len(rules))
for _, rule := range rules {
matchFn, err := e.expiredObjectMatcher(ctx, bktInfo, ni, rule)
if err != nil {
e.log.Warn(logs.SkipRule, zap.String("rule", rule.ID), zap.Error(err))
continue
}
matchers = append(matchers, matchFn)
}
if len(matchers) == 0 {
return nil, errNotApplicableRule
}
return func(index int, versions []*data.NodeVersion) (bool, error) {
var numErrors int
for _, matcher := range matchers {
if matched, err := matcher(index, versions); err != nil {
e.log.Warn(logs.ObjectMatchingFailed, zap.Error(err))
numErrors++
continue
} else if matched {
return true, nil
}
}
var err error
if numErrors == len(matchers) {
err = errAllObjectMatcherFailed
}
return false, err
}, nil
}
func (e *Executor) expireObject(ctx context.Context, versions []*data.NodeVersion, bktInfo *data.BucketInfo, ni *netmap.NetworkInfo, matcherFn MatchObjectFunc, settings *data.BucketSettings) error {
if len(versions) == 0 {
return nil
}
sort.Slice(versions, func(i, j int) bool {
return versions[i].Timestamp < versions[j].Timestamp
})
var nullVersionsToDelete []int
for i, version := range versions {
if version.IsUnversioned {
nullVersionsToDelete = append(nullVersionsToDelete, i)
}
matched, err := matcherFn(i, versions)
if err != nil {
return err
}
if !matched {
continue
}
if i != len(versions)-1 { // non current
e.deleteObject(ctx, version, bktInfo)
if version.IsUnversioned {
nullVersionsToDelete = nullVersionsToDelete[:len(nullVersionsToDelete)-1]
}
continue
}
switch {
case settings.Unversioned():
e.deleteObject(ctx, version, bktInfo)
case settings.VersioningEnabled():
if version.IsDeleteMarker && len(versions) == 1 { // remove expired object delete marker
e.removeVersion(ctx, version, bktInfo)
} else if !version.IsDeleteMarker {
e.addDeleteMarker(ctx, version, ni, bktInfo, settings)
}
default:
for _, index := range nullVersionsToDelete {
if versions[index].ID == version.ID && version.IsDeleteMarker {
continue
}
e.deleteObject(ctx, versions[index], bktInfo)
}
if !version.IsDeleteMarker {
e.addDeleteMarker(ctx, version, ni, bktInfo, settings)
}
}
}
return nil
}
func (e *Executor) expiredObjectMatcher(ctx context.Context, bktInfo *data.BucketInfo, ni *netmap.NetworkInfo, rule data.LifecycleRule) (func(int, []*data.NodeVersion) (bool, error), error) {
if rule.Status == statusDisabled {
return nil, fmt.Errorf("%w: rule disabled", errNotApplicableRule)
}
if rule.Expiration == nil && rule.NonCurrentVersionExpiration == nil {
return nil, fmt.Errorf("%w: missing expiration and non current expiration", errNotApplicableRule)
}
var prm objectMatcherParams
prm.newerNonCurrentVersions = math.MaxInt
prm.minObjSize, prm.maxObjSize = getObjectSizeRange(rule)
if rule.NonCurrentVersionExpiration != nil {
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil {
prm.newerNonCurrentVersions = *rule.NonCurrentVersionExpiration.NewerNonCurrentVersions
}
if rule.NonCurrentVersionExpiration.NonCurrentDays != nil {
nonCurrentDuration, err := durationToEpochsAbs(ni, 24*time.Hour*time.Duration(*rule.NonCurrentVersionExpiration.NonCurrentDays))
if err != nil {
return nil, fmt.Errorf("NonCurrentDays to epochs: %w", err)
}
prm.nonCurrentDuration = &nonCurrentDuration
}
}
if rule.Expiration != nil {
if rule.Expiration.Date != "" {
dateToExpire, err := time.Parse(time.RFC3339, rule.Expiration.Date)
if err != nil {
return nil, fmt.Errorf("invalid expiration date '%s': %w", rule.Expiration.Date, err)
}
expirationEpoch, err := timeToEpoch(ni, dateToExpire)
if err != nil {
return nil, fmt.Errorf("expiration date to epoch: %w", err)
}
prm.expirationEpoch = &expirationEpoch
}
if rule.Expiration.Days != nil {
expirationDurationEpochs, err := durationToEpochsAbs(ni, 24*time.Hour*time.Duration(*rule.Expiration.Days))
if err != nil {
return nil, fmt.Errorf("Expiration.Days to epochs: %w", err)
}
prm.expirationDurationEpochs = &expirationDurationEpochs
}
prm.expiredObjectDeleteMarker = rule.Expiration.ExpiredObjectDeleteMarker != nil && *rule.Expiration.ExpiredObjectDeleteMarker
}
if rule.Filter != nil {
prm.prefix = rule.Filter.Prefix
if rule.Filter.And != nil {
prm.prefix = rule.Filter.And.Prefix
}
if rule.Filter.Tag != nil {
prm.tagsToMatch = append(prm.tagsToMatch, *rule.Filter.Tag)
}
if rule.Filter.And != nil {
prm.tagsToMatch = append(prm.tagsToMatch, rule.Filter.And.Tags...)
}
}
return e.objectMatcher(ctx, bktInfo, ni, prm), nil
}
type objectMatcherParams struct {
prefix string
newerNonCurrentVersions int
minObjSize uint64
maxObjSize uint64
nonCurrentDuration *uint64
expiredObjectDeleteMarker bool
expirationEpoch *uint64
expirationDurationEpochs *uint64
tagsToMatch []data.Tag
}
func (e *Executor) objectMatcher(ctx context.Context, bktInfo *data.BucketInfo, ni *netmap.NetworkInfo, prm objectMatcherParams) func(int, []*data.NodeVersion) (bool, error) {
return func(index int, versions []*data.NodeVersion) (bool, error) {
if !strings.HasPrefix(versions[index].FilePath, prm.prefix) {
return false, nil
}
if index < len(versions)-prm.newerNonCurrentVersions-1 {
return true, nil
}
version := versions[index]
if prm.nonCurrentDuration != nil {
if index < len(versions)-1 {
next := versions[index+1]
epoch, err := versionCreationEpoch(next, ni)
if err != nil {
return false, err
}
if epoch+(*prm.nonCurrentDuration) < ni.CurrentEpoch() {
return true, nil
}
}
}
// the following applying only for current version,
// so we have to skip all non-current
if index != len(versions)-1 {
return false, nil
}
// remove expired delete marker unconditionally
if version.IsDeleteMarker && len(versions) == 1 && prm.expiredObjectDeleteMarker {
return true, nil
}
if prm.expirationEpoch == nil && prm.expirationDurationEpochs == nil {
return false, nil
}
versionEpoch, err := versionCreationEpoch(version, ni)
if err != nil {
return false, err
}
var expirationEpoch uint64
switch {
case prm.expirationEpoch == nil:
expirationEpoch = versionEpoch + *prm.expirationDurationEpochs
case prm.expirationDurationEpochs == nil:
expirationEpoch = *prm.expirationEpoch
default:
expirationEpoch = min(*prm.expirationEpoch, versionEpoch+*prm.expirationDurationEpochs)
}
if ni.CurrentEpoch() < expirationEpoch {
return false, nil
}
if version.IsDeleteMarker && len(versions) == 1 { // remove expired delete marker under matching all conditions
return true, nil
}
if version.Size < prm.minObjSize || version.Size > prm.maxObjSize {
return false, nil
}
if len(prm.tagsToMatch) == 0 {
return true, nil
}
tags, err := e.tree.GetObjectTagging(ctx, bktInfo, version)
if err != nil {
return false, fmt.Errorf("get object tags from tree: %w", err)
}
for _, tag := range prm.tagsToMatch {
if tags[tag.Key] != tag.Value {
return false, nil
}
}
return true, nil
}
}
func (e *Executor) deleteObject(ctx context.Context, version *data.NodeVersion, bktInfo *data.BucketInfo) {
if !version.IsDeleteMarker {
var addr oid.Address
addr.SetContainer(bktInfo.CID)
addr.SetObject(version.OID)
e.log.Debug(logs.TagExpirationDeleteObject, zap.Stringer("address", addr), zap.String("filepath", version.FilePath))
if err := e.frostfs.DeleteObject(ctx, addr); err != nil && !isNotFound(err) {
e.log.Warn(logs.DeleteObjectVersionFromStorage, zap.String("key", version.FilePath),
zap.String("address", addr.EncodeToString()), zap.Error(err))
return
}
}
e.removeVersion(ctx, version, bktInfo)
}
func (e *Executor) addDeleteMarker(ctx context.Context, version *data.NodeVersion, ni *netmap.NetworkInfo, bktInfo *data.BucketInfo, settings *data.BucketSettings) {
randOID, err := getRandomOID()
if err != nil {
e.log.Warn(logs.FailedToGenerateRandomIDForDeleteMarker, zap.Error(err))
return
}
e.log.Debug(logs.AddDeleteMarker, zap.String("cid", bktInfo.CID.EncodeToString()), zap.String("oid", randOID.EncodeToString()),
zap.String("filepath", version.FilePath))
now := nowTime()
newVersion := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{
OID: randOID,
FilePath: version.FilePath,
Created: &now,
Owner: &bktInfo.Owner,
IsDeleteMarker: true,
CreationEpoch: ni.CurrentEpoch(),
},
IsUnversioned: settings.VersioningSuspended(),
}
if _, err = e.tree.AddVersion(ctx, bktInfo, newVersion); err != nil {
e.log.Warn(logs.AddDeleteMarker, zap.Error(err))
return
}
}
func (e *Executor) removeVersion(ctx context.Context, version *data.NodeVersion, bktInfo *data.BucketInfo) {
e.log.Debug(logs.TagExpirationRemoveVersion, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", version.OID),
zap.String("filepath", version.FilePath), zap.Uint64("node_id", version.ID))
if err := e.tree.RemoveVersion(ctx, bktInfo, version.ID); err != nil {
e.log.Warn(logs.DeleteObjectVersionFromTree, zap.String("key", version.FilePath),
zap.Uint64("id", version.ID), zap.Error(err))
}
}
func getRandomOID() (oid.ID, error) {
b := [32]byte{}
if _, err := rand.Read(b[:]); err != nil {
return oid.ID{}, err
}
var objID oid.ID
objID.SetSHA256(b)
return objID, nil
}
func getObjectSizeRange(rule data.LifecycleRule) (uint64, uint64) {
minObjSize := uint64(0)
maxObjSize := uint64(math.MaxUint64)
if rule.Filter == nil {
return minObjSize, maxObjSize
}
if rule.Filter.ObjectSizeGreaterThan != nil {
minObjSize = *rule.Filter.ObjectSizeGreaterThan
if minObjSize != math.MaxUint64 {
minObjSize++
}
} else if rule.Filter.And != nil && rule.Filter.And.ObjectSizeGreaterThan != nil {
minObjSize = *rule.Filter.And.ObjectSizeGreaterThan
if minObjSize != math.MaxUint64 {
minObjSize++
}
}
if rule.Filter.ObjectSizeLessThan != nil {
maxObjSize = *rule.Filter.ObjectSizeLessThan
if maxObjSize != 0 {
maxObjSize--
}
} else if rule.Filter.And != nil && rule.Filter.And.ObjectSizeLessThan != nil {
maxObjSize = *rule.Filter.And.ObjectSizeLessThan - 1
if maxObjSize != 0 {
maxObjSize--
}
}
return minObjSize, maxObjSize
}
func isNotFound(err error) bool {
return client.IsErrObjectAlreadyRemoved(err) || client.IsErrObjectNotFound(err)
}
func versionCreationEpoch(version *data.NodeVersion, ni *netmap.NetworkInfo) (uint64, error) {
objCreationEpoch := version.CreationEpoch
if objCreationEpoch == 0 {
created := nowTime()
if version.Created != nil {
created = *version.Created
}
var err error
if objCreationEpoch, err = timeToEpoch(ni, created); err != nil {
return 0, fmt.Errorf("time to epoch: %w", err)
}
}
return objCreationEpoch, nil
}
func (e *Executor) abortMultipart(ctx context.Context, bktInfo *data.BucketInfo, multipart *data.MultipartInfo) error {
e.log.Debug(logs.AbortMultipart, zap.String("cid", bktInfo.CID.EncodeToString()), zap.String("key", multipart.Key),
zap.String("upload_id", multipart.UploadID), zap.Uint64("node_id", multipart.ID))
parts, err := e.tree.GetParts(ctx, bktInfo, multipart.ID)
if err != nil {
return fmt.Errorf("get parts: %w", err)
}
var addr oid.Address
addr.SetContainer(bktInfo.CID)
for _, part := range parts {
addr.SetObject(part.OID)
e.log.Debug(logs.TagMultipartDeleteObject, zap.Stringer("address", addr), zap.Int("part", part.Number))
if err = e.frostfs.DeleteObject(ctx, addr); err != nil {
return fmt.Errorf("delete object '%s': %w", addr.EncodeToString(), err)
}
}
if err = e.tree.DeleteMultipartUpload(ctx, bktInfo, multipart); err != nil {
return fmt.Errorf("delete multipart '%d': %w", multipart.ID, err)
}
return nil
}
func creationEpoch(ni *netmap.NetworkInfo, created time.Time, meta map[string]string) (creationEpoch uint64, err error) {
createdEpochStr := meta[creationEpochKV]
if createdEpochStr != "" {
if creationEpoch, err = strconv.ParseUint(createdEpochStr, 10, 64); err != nil {
return 0, fmt.Errorf("invalid creation epoch '%s': %w", createdEpochStr, err)
}
} else {
if creationEpoch, err = timeToEpoch(ni, created); err != nil {
return 0, fmt.Errorf("time to epoch: %w", err)
}
}
return creationEpoch, nil
}
func timeToEpoch(ni *netmap.NetworkInfo, timeToConvert time.Time) (uint64, error) {
dur := timeToConvert.Sub(nowTime())
epochLifetime, err := durationToEpochsAbs(ni, dur)
if err != nil {
return 0, err
}
curr := ni.CurrentEpoch()
var epoch uint64
if dur > 0 {
if epochLifetime >= math.MaxUint64-curr {
epoch = math.MaxUint64
} else {
epoch = curr + epochLifetime
}
} else {
if epochLifetime >= curr {
epoch = 0
} else {
epoch = curr - epochLifetime
}
}
return epoch, nil
}
func durationToEpochsAbs(ni *netmap.NetworkInfo, duration time.Duration) (uint64, error) {
duration = duration.Abs()
durEpoch := ni.EpochDuration()
if durEpoch == 0 {
return 0, errors.New("epoch duration is missing or zero")
}
msPerEpoch := durEpoch * uint64(ni.MsPerBlock())
epochLifetime := uint64(duration.Milliseconds()) / msPerEpoch
if uint64(duration.Milliseconds())%msPerEpoch != 0 {
epochLifetime++
}
return epochLifetime, nil
}

View file

@ -1,494 +0,0 @@
package lifecycle
import (
"context"
"strconv"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
type executorContext struct {
ctx context.Context
cancel context.CancelFunc
log *zap.Logger
key *keys.PrivateKey
ffs *frostfsFetcherMock
tree *tree.Tree
jobs chan Job
executor *Executor
}
func newExecutorContext(t *testing.T) *executorContext {
log := zaptest.NewLogger(t)
ec, err := newExecutorContextBase(log)
require.NoError(t, err)
return ec
}
func newExecutorContextBase(log *zap.Logger) (*executorContext, error) {
ec := &executorContext{
log: log,
ffs: newFrostFSFetcherMock(),
jobs: make(chan Job),
}
var err error
ec.key, err = keys.NewPrivateKey()
if err != nil {
return nil, err
}
memTreeCli, err := tree.NewTreeServiceClientMemory()
if err != nil {
return nil, err
}
ec.tree = tree.NewTree(memTreeCli, log)
ec.ctx, ec.cancel = context.WithCancel(context.Background())
cfg := ExecutorConfig{
Logger: ec.log,
Jobs: ec.jobs,
WorkerPoolSize: 1,
TreeFetcher: ec.tree,
FrostFSFetcher: ec.ffs,
}
if ec.executor, err = NewExecutor(ec.ctx, cfg); err != nil {
return nil, err
}
return ec, nil
}
func (e *executorContext) owner() user.ID {
var owner user.ID
user.IDFromKey(&owner, e.key.PrivateKey.PublicKey)
return owner
}
func (e *executorContext) close() {
close(e.jobs)
e.executor.Done()
}
func (e *executorContext) createBktInfo(versioningStatus string) (*data.BucketInfo, error) {
cnrID := cidtest.ID()
bktInfo := &data.BucketInfo{
CID: cnrID,
Owner: e.owner(),
}
err := e.tree.PutSettingsNode(e.ctx, bktInfo, &data.BucketSettings{Versioning: versioningStatus})
if err != nil {
return nil, err
}
return bktInfo, nil
}
func (e *executorContext) addObject(bktInfo *data.BucketInfo, name string, size int, tags map[string]string) (oid.Address, error) {
var objAddr oid.Address
objAddr.SetContainer(bktInfo.CID)
objAddr.SetObject(oidtest.ID())
obj := object.New()
obj.SetContainerID(objAddr.Container())
obj.SetID(objAddr.Object())
e.ffs.setObject(objAddr, obj)
content := "content"
buf := make([]byte, size)
for i := 0; i < len(buf); i++ {
buf[i] = content[i%len(content)]
}
obj.SetPayload(buf)
settings, err := e.tree.GetSettingsNode(e.ctx, bktInfo)
if err != nil {
return oid.Address{}, err
}
ni, err := e.ffs.NetworkInfo(e.ctx)
if err != nil {
return oid.Address{}, err
}
now := time.Now()
nodeVersion := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{
OID: objAddr.Object(),
FilePath: name,
Owner: &bktInfo.Owner,
Created: &now,
Size: uint64(size),
CreationEpoch: ni.CurrentEpoch(),
},
IsUnversioned: !settings.VersioningEnabled(),
}
id, err := e.tree.AddVersion(context.TODO(), bktInfo, nodeVersion)
if err != nil {
return oid.Address{}, err
}
nodeVersion.ID = id
if err = e.tree.PutObjectTagging(context.TODO(), bktInfo, nodeVersion, tags); err != nil {
return oid.Address{}, err
}
return objAddr, nil
}
func TestExecutorFilterPrefix(t *testing.T) {
ec := newExecutorContext(t)
defer ec.cancel()
bktInfo, err := ec.createBktInfo(data.VersioningUnversioned)
require.NoError(t, err)
objAddr1, err := ec.addObject(bktInfo, "obj", 10, nil)
require.NoError(t, err)
objAddr2, err := ec.addObject(bktInfo, "tmp/obj", 10, nil)
require.NoError(t, err)
lifecycleCfg := &data.LifecycleConfiguration{Rules: []data.LifecycleRule{{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Date: "2024-01-24T12:19:33Z"},
Filter: &data.LifecycleRuleFilter{Prefix: "tmp"},
ID: "test",
}}}
ec.jobs <- Job{
ContainerID: bktInfo.CID,
PrivateKey: ec.key,
LifecycleConfiguration: lifecycleCfg,
Epoch: 1,
}
ec.close()
_, err = ec.ffs.GetObject(ec.ctx, objAddr1)
require.NoError(t, err)
_, err = ec.ffs.GetObject(ec.ctx, objAddr2)
require.Truef(t, client.IsErrObjectNotFound(err), "expected not found error, got: %v", err)
}
func TestExecutorFilterNewerNoncurrent(t *testing.T) {
ec := newExecutorContext(t)
defer ec.cancel()
bktInfo, err := ec.createBktInfo(data.VersioningEnabled)
require.NoError(t, err)
ln := 10
addresses := make([]oid.Address, ln)
for i := 0; i < ln; i++ {
addresses[i], err = ec.addObject(bktInfo, "obj", i, nil)
require.NoError(t, err)
}
maxNonCurrent := 3
lifecycleCfg := &data.LifecycleConfiguration{Rules: []data.LifecycleRule{{
Status: "Enabled",
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{NewerNonCurrentVersions: &maxNonCurrent},
ID: "test",
}}}
ec.jobs <- Job{
ContainerID: bktInfo.CID,
PrivateKey: ec.key,
LifecycleConfiguration: lifecycleCfg,
Epoch: 1,
}
ec.close()
for i, addr := range addresses {
_, err = ec.ffs.GetObject(ec.ctx, addr)
if i < len(addresses)-maxNonCurrent-1 {
require.Truef(t, client.IsErrObjectNotFound(err), "expected not found error, got: %v", err)
} else {
require.NoError(t, err)
}
}
versions, err := ec.tree.GetVersions(ec.ctx, bktInfo, "obj")
require.NoError(t, err)
require.Len(t, versions, maxNonCurrent+1)
}
func TestExecutorFilterNoncurrent(t *testing.T) {
ec := newExecutorContext(t)
defer ec.cancel()
bktInfo, err := ec.createBktInfo(data.VersioningEnabled)
require.NoError(t, err)
addr1, err := ec.addObject(bktInfo, "obj", 0, nil)
require.NoError(t, err)
ec.ffs.setEpoch(2)
addr2, err := ec.addObject(bktInfo, "obj", 0, nil)
require.NoError(t, err)
lifecycleCfg := &data.LifecycleConfiguration{Rules: []data.LifecycleRule{{
Status: "Enabled",
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{NonCurrentDays: ptrInt(1)},
ID: "test",
}}}
ec.jobs <- Job{
ContainerID: bktInfo.CID,
PrivateKey: ec.key,
LifecycleConfiguration: lifecycleCfg,
Epoch: 30, // epoch duration is 1h, so we set epoch that certainly be after 24h
}
ec.close()
_, err = ec.ffs.GetObject(ec.ctx, addr1)
require.Truef(t, client.IsErrObjectNotFound(err), "expected not found error, got: %v", err)
_, err = ec.ffs.GetObject(ec.ctx, addr2)
require.NoError(t, err)
}
func TestExecutorMultipleRules(t *testing.T) {
ec := newExecutorContext(t)
defer ec.cancel()
bktInfo, err := ec.createBktInfo(data.VersioningUnversioned)
require.NoError(t, err)
ln := 5
addresses := make([]oid.Address, ln)
addresses[0], err = ec.addObject(bktInfo, "obj0", 0, nil)
require.NoError(t, err)
addresses[1], err = ec.addObject(bktInfo, "obj1", 100, nil)
require.NoError(t, err)
addresses[2], err = ec.addObject(bktInfo, "obj2", 50, nil)
require.NoError(t, err)
addresses[3], err = ec.addObject(bktInfo, "obj3", 0, map[string]string{"tag1": "val1"})
require.NoError(t, err)
addresses[4], err = ec.addObject(bktInfo, "tmp1", 0, nil)
require.NoError(t, err)
lifecycleCfg := &data.LifecycleConfiguration{Rules: []data.LifecycleRule{
{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Date: "2024-01-24T12:19:33Z"},
Filter: &data.LifecycleRuleFilter{Prefix: "tmp1"},
ID: "for tmp1/obj",
},
{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Date: "2024-01-24T12:19:33Z"},
Filter: &data.LifecycleRuleFilter{ObjectSizeGreaterThan: ptrUint64(25), ObjectSizeLessThan: ptrUint64(75)},
ID: "for obj1",
},
{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Date: "2024-01-24T12:19:33Z"},
Filter: &data.LifecycleRuleFilter{Tag: &data.Tag{Key: "tag1", Value: "val1"}},
ID: "for obj3",
},
}}
ec.jobs <- Job{
ContainerID: bktInfo.CID,
PrivateKey: ec.key,
LifecycleConfiguration: lifecycleCfg,
Epoch: 1,
}
ec.close()
for i, addr := range addresses {
_, err = ec.ffs.GetObject(ec.ctx, addr)
if i == 0 || i == 1 {
require.NoError(t, err)
} else {
require.Truef(t, client.IsErrObjectNotFound(err), "expected not found error, got: %v (obj %d)", err, i)
}
}
}
func TestExecutorMultipleRulesMultiparts(t *testing.T) {
ec := newExecutorContext(t)
defer ec.cancel()
bktInfo, err := ec.createBktInfo(data.VersioningUnversioned)
require.NoError(t, err)
err = ec.tree.CreateMultipartUpload(ec.ctx, bktInfo, &data.MultipartInfo{Key: "obj1", UploadID: "upload1", Meta: map[string]string{}})
require.NoError(t, err)
err = ec.tree.CreateMultipartUpload(ec.ctx, bktInfo, &data.MultipartInfo{Key: "obj2", UploadID: "upload2", Meta: map[string]string{}})
require.NoError(t, err)
lifecycleCfg := &data.LifecycleConfiguration{Rules: []data.LifecycleRule{
{
Status: "Disabled",
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{DaysAfterInitiation: ptrInt(1)},
Filter: &data.LifecycleRuleFilter{Prefix: "obj1"},
ID: "for obj1",
},
{
Status: "Enabled",
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{DaysAfterInitiation: ptrInt(1)},
Filter: &data.LifecycleRuleFilter{Prefix: "obj2"},
ID: "for obj2",
},
}}
ec.jobs <- Job{
ContainerID: bktInfo.CID,
PrivateKey: ec.key,
LifecycleConfiguration: lifecycleCfg,
Epoch: 50,
}
ec.close()
multiparts, err := ec.tree.GetMultipartUploadsByPrefix(ec.ctx, bktInfo, "")
require.NoError(t, err)
require.Len(t, multiparts, 1)
require.Equal(t, multiparts[0].Key, "obj1")
}
func TestExecutorAbortMultipartsInDays(t *testing.T) {
ec := newExecutorContext(t)
defer ec.cancel()
bktInfo, err := ec.createBktInfo(data.VersioningUnversioned)
require.NoError(t, err)
err = ec.tree.CreateMultipartUpload(ec.ctx, bktInfo, &data.MultipartInfo{Key: "obj1", UploadID: "upload1", Meta: map[string]string{}})
require.NoError(t, err)
lifecycleCfg := &data.LifecycleConfiguration{Rules: []data.LifecycleRule{{
Status: "Enabled",
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{DaysAfterInitiation: ptrInt(1)},
ID: "for obj1",
}}}
ec.jobs <- Job{
ContainerID: bktInfo.CID,
PrivateKey: ec.key,
LifecycleConfiguration: lifecycleCfg,
Epoch: 1000,
}
ec.close()
multiparts, err := ec.tree.GetMultipartUploadsByPrefix(ec.ctx, bktInfo, "")
require.NoError(t, err)
require.Len(t, multiparts, 0)
}
func TestExecutorExpireObjectsInDays(t *testing.T) {
ec := newExecutorContext(t)
defer ec.cancel()
bktInfo, err := ec.createBktInfo(data.VersioningUnversioned)
require.NoError(t, err)
// set epoch to 1 hour
ec.ffs.setEpoch(1)
ec.ffs.setEpochDuration(3600)
ec.ffs.setMsPerBlock(1000)
addresses := make([]oid.Address, 6)
for i := range addresses {
addresses[i], err = ec.addObject(bktInfo, "obj"+strconv.Itoa(i), 0, nil)
require.NoError(t, err)
}
lifecycleCfg := &data.LifecycleConfiguration{Rules: []data.LifecycleRule{
{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Days: ptrInt(1), Date: time.Now().Add(30 * time.Hour).Format(time.RFC3339)},
Filter: &data.LifecycleRuleFilter{Prefix: "obj0"},
ID: "obj0 expired",
},
{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Days: ptrInt(1), Date: time.Now().Add(18 * time.Hour).Format(time.RFC3339)},
Filter: &data.LifecycleRuleFilter{Prefix: "obj1"},
ID: "obj1 expired",
},
{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Days: ptrInt(1), Date: time.Now().Add(42 * time.Hour).Format(time.RFC3339)},
Filter: &data.LifecycleRuleFilter{Prefix: "obj2"},
ID: "obj2 expired",
},
{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Days: ptrInt(2), Date: time.Now().Add(30 * time.Hour).Format(time.RFC3339)},
Filter: &data.LifecycleRuleFilter{Prefix: "obj3"},
ID: "obj3 expired",
},
{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Days: ptrInt(2), Date: time.Now().Add(52 * time.Hour).Format(time.RFC3339)},
Filter: &data.LifecycleRuleFilter{Prefix: "obj4"},
ID: "obj4 not expired",
},
{
Status: "Enabled",
Expiration: &data.LifecycleExpiration{Days: ptrInt(2), Date: time.Now().Add(42 * time.Hour).Format(time.RFC3339)},
Filter: &data.LifecycleRuleFilter{Prefix: "obj5"},
ID: "obj5 not expired",
},
}}
currentEpoch := uint64(36)
ec.ffs.setEpoch(currentEpoch)
nowTime = func() time.Time {
return time.Now().Add(time.Duration(currentEpoch) * time.Hour)
}
ec.jobs <- Job{
ContainerID: bktInfo.CID,
PrivateKey: ec.key,
LifecycleConfiguration: lifecycleCfg,
Epoch: currentEpoch,
}
ec.close()
for i, addr := range addresses {
_, err = ec.ffs.GetObject(ec.ctx, addr)
if i >= 4 {
require.NoError(t, err, "expected no error, got: %v (obj %d)", err, i)
} else {
require.Truef(t, client.IsErrObjectNotFound(err), "expected not found error, got: %v (obj %d)", err, i)
}
}
}
func ptrUint64(val uint64) *uint64 {
return &val
}
func ptrInt(val int) *int {
return &val
}

View file

@ -1,415 +0,0 @@
package lifecycle
import (
"context"
"crypto/ecdsa"
"encoding/binary"
"encoding/xml"
"fmt"
"io"
"slices"
"sort"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/hrw"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
"golang.org/x/text/encoding/ianaindex"
)
type Job struct {
ContainerID cid.ID
PrivateKey *keys.PrivateKey
LifecycleConfiguration *data.LifecycleConfiguration
Epoch uint64
Bearer *bearer.Token
}
type JobProvider struct {
userFetcher UserFetcher
containerFetcher ContainerFetcher
treeFetcher TreeFetcher
frostfsFetcher FrostFSFetcher
credentialSource CredentialSource
settings Settings
currentLifecycler *keys.PrivateKey
log *zap.Logger
cancelCurrentFetch context.CancelFunc
jobChan chan Job
epochChan <-chan uint64
}
type Settings interface {
ServicesKeys() keys.PublicKeys
}
type Config struct {
UserFetcher UserFetcher
ContainerFetcher ContainerFetcher
FrostFSFetcher FrostFSFetcher
CredentialSource CredentialSource
TreeFetcher TreeFetcher
Settings Settings
CurrentLifecycler *keys.PrivateKey
Logger *zap.Logger
BufferSize int
EpochChannel <-chan uint64
}
func NewJobProvider(ctx context.Context, cfg Config) *JobProvider {
provider := &JobProvider{
userFetcher: cfg.UserFetcher,
settings: cfg.Settings,
log: cfg.Logger,
containerFetcher: cfg.ContainerFetcher,
treeFetcher: cfg.TreeFetcher,
frostfsFetcher: cfg.FrostFSFetcher,
credentialSource: cfg.CredentialSource,
currentLifecycler: cfg.CurrentLifecycler,
epochChan: cfg.EpochChannel,
jobChan: make(chan Job, cfg.BufferSize),
cancelCurrentFetch: func() {},
}
go provider.startFetchRoutine(ctx)
return provider
}
type objToHRW struct {
epoch uint64
hash util.Uint160
}
func (o objToHRW) bytes() []byte {
buf := make([]byte, binary.MaxVarintLen64)
ln := binary.PutUvarint(buf, o.epoch)
return append(o.hash[:], buf[:ln]...)
}
type UserContainer struct {
ID user.ID
Key *keys.PrivateKey
Container cid.ID
APEChain ape.Chain
}
func (p *JobProvider) Jobs() <-chan Job {
return p.jobChan
}
func (p *JobProvider) startFetchRoutine(ctx context.Context) {
var (
epochCtx context.Context
wg sync.WaitGroup
)
defer func() {
wg.Wait()
close(p.jobChan)
}()
for {
select {
case <-ctx.Done():
p.log.Info(logs.JobProviderStopped, zap.Error(ctx.Err()))
p.cancelCurrentFetch()
return
case epoch, ok := <-p.epochChan:
if !ok {
p.log.Info(logs.JobProviderStoppedBecauseOfEpochChan)
return
}
p.log.Info(logs.FetcherTriggerEpoch, zap.Uint64("epoch", epoch))
p.cancelCurrentFetch()
wg.Wait()
p.cleanJobChannel()
epochCtx, p.cancelCurrentFetch = context.WithCancel(ctx)
wg.Add(1)
go p.handleEpoch(epochCtx, epoch, &wg)
}
}
}
func (p *JobProvider) cleanJobChannel() {
for len(p.jobChan) != 0 {
select {
case <-p.jobChan:
default:
}
}
}
func (p *JobProvider) handleEpoch(ctx context.Context, epoch uint64, wg *sync.WaitGroup) {
defer wg.Done()
userHashes, err := p.userFetcher.Users()
if err != nil {
p.log.Error(logs.FailedToFetchUsers, zap.Error(err))
return
}
lifecyclers, currentPosition := p.svcKeys()
indexes := make([]uint64, len(lifecyclers))
for i := range indexes {
indexes[i] = uint64(i)
}
obj := objToHRW{epoch: epoch}
for i := range userHashes {
obj.hash = userHashes[i]
h := hrw.Hash(obj.bytes())
if hrw.Sort(indexes, h)[0] != currentPosition {
continue
}
select {
case <-ctx.Done():
return
default:
if err = p.handleUser(ctx, userHashes[i], epoch); err != nil {
p.log.Warn(logs.FailedToHandleUser,
zap.String("address", address.Uint160ToString(userHashes[i])),
zap.Error(err))
}
}
}
}
func (p *JobProvider) handleUser(ctx context.Context, userHash util.Uint160, epoch uint64) error {
userKey, err := p.resolveUserKey(ctx, userHash)
if err != nil {
return fmt.Errorf("resolve key: %w", err)
}
var userID user.ID
user.IDFromKey(&userID, (ecdsa.PublicKey)(*userKey.PublicKey()))
containers, err := p.containerFetcher.Containers(userID)
if err != nil {
return fmt.Errorf("list user containers: %w", err)
}
p.log.Info(logs.FoundUserContainers,
zap.String("user", userID.EncodeToString()),
zap.Int("containers", len(containers)))
successfullyFetchedContainers := len(containers)
allowedChainRaw := p.formAllowedAPEChain().Bytes()
for _, container := range containers {
uc := &UserContainer{
ID: userID,
Key: userKey,
Container: container,
APEChain: ape.Chain{Raw: allowedChainRaw},
}
select {
case <-ctx.Done():
return ctx.Err()
default:
if err = p.handleContainer(ctx, uc, epoch); err != nil {
p.log.Warn(logs.FailedToHandleContainer,
zap.String("user", userID.EncodeToString()),
zap.String("cid", container.EncodeToString()),
zap.Error(err))
successfullyFetchedContainers--
}
}
}
p.log.Info(logs.FetchedUserContainers,
zap.String("user", userID.EncodeToString()),
zap.Int("successful", successfullyFetchedContainers),
zap.Int("all", len(containers)))
return nil
}
func (p *JobProvider) handleContainer(ctx context.Context, uc *UserContainer, epoch uint64) error {
var lifecyclerOwner user.ID
user.IDFromKey(&lifecyclerOwner, p.currentLifecycler.PrivateKey.PublicKey) // consider pre-compute this
bktInfo := &data.BucketInfo{
CID: uc.Container,
Owner: uc.ID,
}
apeOverride := formAPEOverride(uc)
btoken, err := formBearerToken(epoch, apeOverride, uc.Key, lifecyclerOwner)
if err != nil {
return fmt.Errorf("form bearer token: %w", err)
}
bCtx := addBearerToContext(ctx, btoken)
addr, err := p.treeFetcher.GetBucketLifecycleConfiguration(bCtx, bktInfo)
if err != nil {
return fmt.Errorf("get lifecycle configuration from tree: %w", err)
}
if uc.Container.Equals(addr.Container()) {
ctx = bCtx
}
configuration, err := p.fetchLifecycleConfiguration(ctx, addr)
if err != nil {
return fmt.Errorf("get lifecycle configuration from storage: %w", err)
}
job := Job{
ContainerID: uc.Container,
PrivateKey: uc.Key,
LifecycleConfiguration: configuration,
Epoch: epoch,
Bearer: btoken,
}
select {
case <-ctx.Done():
return ctx.Err()
case p.jobChan <- job:
}
return nil
}
func (p *JobProvider) resolveUserKey(ctx context.Context, userHash util.Uint160) (*keys.PrivateKey, error) {
userKey, err := p.userFetcher.UserKey(userHash)
if err != nil {
return nil, fmt.Errorf("get public key: %w", err)
}
privateKey, err := p.credentialSource.Credentials(ctx, userKey)
if err != nil {
return nil, fmt.Errorf("get private key: %w", err)
}
return privateKey, nil
}
func (p *JobProvider) svcKeys() (keys.PublicKeys, uint64) {
currentPublicKey := p.currentLifecycler.PublicKey()
lifecyclerKeys := p.settings.ServicesKeys()
if position := slices.IndexFunc(lifecyclerKeys, func(pk *keys.PublicKey) bool {
return pk.Equal(currentPublicKey)
}); position == -1 {
lifecyclerKeys = append(lifecyclerKeys, currentPublicKey)
}
sort.Slice(lifecyclerKeys, func(i, j int) bool {
return lifecyclerKeys[i].Cmp(lifecyclerKeys[j]) == -1
})
position := slices.IndexFunc(lifecyclerKeys, func(pk *keys.PublicKey) bool {
return pk.Equal(currentPublicKey)
})
if position == -1 {
// should never happen
panic("current lifecycler key isn't in list")
}
return lifecyclerKeys, uint64(position)
}
func (p *JobProvider) fetchLifecycleConfiguration(ctx context.Context, addr oid.Address) (*data.LifecycleConfiguration, error) {
res, err := p.frostfsFetcher.GetObject(ctx, addr)
if err != nil {
return nil, err
}
defer func() {
if closeErr := res.Payload.Close(); closeErr != nil {
p.log.Warn("could not close object payload", zap.String("address", addr.EncodeToString()), zap.Error(closeErr))
}
}()
lifecycleCfg := &data.LifecycleConfiguration{}
dec := newDecoder(res.Payload)
if err = dec.Decode(lifecycleCfg); err != nil {
return nil, fmt.Errorf("unmarshal lifecycle configuration '%s': %w", addr.EncodeToString(), err)
}
return lifecycleCfg, nil
}
func (p *JobProvider) formAllowedAPEChain() *chain.Chain {
return &chain.Chain{
ID: chain.ID("lifecycler/" + p.currentLifecycler.Address()),
Rules: []chain.Rule{{
Status: chain.Allow,
Actions: chain.Actions{Names: []string{"*"}},
Resources: chain.Resources{Names: []string{"*"}},
}},
}
}
func formBearerToken(epoch uint64, apeOverride bearer.APEOverride, userKey *keys.PrivateKey, lifecyclerOwner user.ID) (*bearer.Token, error) {
var btoken bearer.Token
btoken.SetIat(epoch)
btoken.SetNbf(epoch)
btoken.SetExp(epoch + 2) // maybe +1, I'm not sure if we should configure this parameter
btoken.SetAPEOverride(apeOverride)
btoken.AssertUser(lifecyclerOwner)
if err := btoken.Sign(userKey.PrivateKey); err != nil {
return nil, fmt.Errorf("sign: %w", err)
}
return &btoken, nil
}
func formAPEOverride(userInfo *UserContainer) bearer.APEOverride {
return bearer.APEOverride{
Target: ape.ChainTarget{
TargetType: ape.TargetTypeContainer,
Name: userInfo.Container.EncodeToString(),
},
Chains: []ape.Chain{userInfo.APEChain},
}
}
func addBearerToContext(ctx context.Context, btoken *bearer.Token) context.Context {
return middleware.SetBox(ctx, &middleware.Box{
AccessBox: &accessbox.Box{
Gate: &accessbox.GateData{
BearerToken: btoken,
},
},
})
}
const awsDefaultNamespace = "http://s3.amazonaws.com/doc/2006-03-01/"
func newDecoder(r io.Reader) *xml.Decoder {
dec := xml.NewDecoder(r)
dec.DefaultSpace = awsDefaultNamespace
dec.CharsetReader = func(charset string, reader io.Reader) (io.Reader, error) {
enc, err := ianaindex.IANA.Encoding(charset)
if err != nil {
return nil, fmt.Errorf("charset %s: %w", charset, err)
}
return enc.NewDecoder().Reader(reader), nil
}
return dec
}

View file

@ -1,517 +0,0 @@
package lifecycle
import (
"bytes"
"context"
"encoding/xml"
"errors"
"io"
"sync"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
var _ UserFetcher = (*userFetcherMock)(nil)
type userFetcherMock struct {
users map[util.Uint160]*keys.PrivateKey
}
func newUserFetcherMock(users map[util.Uint160]*keys.PrivateKey) *userFetcherMock {
if users == nil {
users = map[util.Uint160]*keys.PrivateKey{}
}
return &userFetcherMock{
users: users,
}
}
func (u *userFetcherMock) Users() ([]util.Uint160, error) {
res := make([]util.Uint160, 0, len(u.users))
for hash := range u.users {
res = append(res, hash)
}
return res, nil
}
func (u *userFetcherMock) UserKey(hash util.Uint160) (*keys.PublicKey, error) {
key, ok := u.users[hash]
if !ok {
return nil, errors.New("userFetcherMock: hash not found")
}
return key.PublicKey(), nil
}
var _ ContainerFetcher = (*containerFetcherMock)(nil)
type containerFetcherMock struct {
containers map[util.Uint160][]cid.ID
}
func newContainerFetcherMock(containers map[util.Uint160][]cid.ID) *containerFetcherMock {
if containers == nil {
containers = map[util.Uint160][]cid.ID{}
}
return &containerFetcherMock{
containers: containers,
}
}
func (c *containerFetcherMock) Containers(owner user.ID) ([]cid.ID, error) {
hash, err := owner.ScriptHash()
if err != nil {
return nil, err
}
containers, ok := c.containers[hash]
if !ok {
return nil, errors.New("containerFetcherMock: hash not found")
}
return containers, nil
}
var _ FrostFSFetcher = (*frostfsFetcherMock)(nil)
type frostfsFetcherMock struct {
mu sync.RWMutex
objects map[oid.Address]*object.Object
epoch uint64
epochDuration uint64
msPerBlock int64
lifecycleContainer cid.ID
enableBearerCheck bool
}
func newFrostFSFetcherMock() *frostfsFetcherMock {
return &frostfsFetcherMock{
objects: map[oid.Address]*object.Object{},
epoch: 1,
epochDuration: 3600,
msPerBlock: 1000,
}
}
func (c *frostfsFetcherMock) setObject(addr oid.Address, obj *object.Object) {
c.mu.Lock()
defer c.mu.Unlock()
c.objects[addr] = obj
}
func (c *frostfsFetcherMock) setEpoch(epoch uint64) {
c.mu.Lock()
defer c.mu.Unlock()
c.epoch = epoch
}
func (c *frostfsFetcherMock) setEpochDuration(blocks uint64) {
c.mu.Lock()
defer c.mu.Unlock()
c.epochDuration = blocks
}
func (c *frostfsFetcherMock) setMsPerBlock(msPerBlock int64) {
c.mu.Lock()
defer c.mu.Unlock()
c.msPerBlock = msPerBlock
}
func (c *frostfsFetcherMock) GetObject(ctx context.Context, addr oid.Address) (pool.ResGetObject, error) {
c.mu.RLock()
defer c.mu.RUnlock()
if c.enableBearerCheck {
_, err := middleware.GetBoxData(ctx)
if c.lifecycleContainer.Equals(addr.Container()) {
if err == nil {
return pool.ResGetObject{}, errors.New("box data present in context for lifecycle container")
}
} else if err != nil {
return pool.ResGetObject{}, err
}
}
val, ok := c.objects[addr]
if !ok {
return pool.ResGetObject{}, &apistatus.ObjectNotFound{}
}
return pool.ResGetObject{
Header: *val,
Payload: &payloadReader{bytes.NewReader(val.Payload())},
}, nil
}
type payloadReader struct {
io.Reader
}
func (p *payloadReader) Close() error { return nil }
func (c *frostfsFetcherMock) NetworkInfo(context.Context) (*netmap.NetworkInfo, error) {
c.mu.RLock()
defer c.mu.RUnlock()
var ni netmap.NetworkInfo
ni.SetCurrentEpoch(c.epoch)
ni.SetEpochDuration(c.epochDuration)
ni.SetMsPerBlock(c.msPerBlock)
return &ni, nil
}
func (c *frostfsFetcherMock) DeleteObject(_ context.Context, addr oid.Address) error {
c.mu.Lock()
defer c.mu.Unlock()
if _, ok := c.objects[addr]; !ok {
return &apistatus.ObjectNotFound{}
}
delete(c.objects, addr)
return nil
}
var _ CredentialSource = (*credentialSourceMock)(nil)
type credentialSourceMock struct {
users map[util.Uint160]*keys.PrivateKey
}
func newCredentialSourceMock(users map[util.Uint160]*keys.PrivateKey) *credentialSourceMock {
if users == nil {
users = map[util.Uint160]*keys.PrivateKey{}
}
return &credentialSourceMock{
users: users,
}
}
func (c *credentialSourceMock) Credentials(_ context.Context, pk *keys.PublicKey) (*keys.PrivateKey, error) {
key, ok := c.users[pk.GetScriptHash()]
if !ok {
return nil, errors.New("credentialSourceMock: hash not found")
}
return key, nil
}
var _ TreeFetcher = (*treeFetcherMock)(nil)
type treeFetcherMock struct {
configurations map[cid.ID]oid.Address
}
func newTreeFetcherMock(configs map[cid.ID]oid.Address) *treeFetcherMock {
if configs == nil {
configs = map[cid.ID]oid.Address{}
}
return &treeFetcherMock{
configurations: configs,
}
}
func (t *treeFetcherMock) GetBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo) (oid.Address, error) {
val, ok := t.configurations[bktInfo.CID]
if !ok {
return oid.Address{}, errors.New("treeFetcherMock: hash not found")
}
return val, nil
}
var _ Settings = (*settingsMock)(nil)
type settingsMock struct{}
func (s *settingsMock) ServicesKeys() keys.PublicKeys {
return nil
}
func TestFetcherBase(t *testing.T) {
ctx := context.Background()
log := zaptest.NewLogger(t)
key, err := keys.NewPrivateKey()
require.NoError(t, err)
mocks, err := initFetcherMocks(2, 1)
require.NoError(t, err)
epochCh := make(chan uint64)
go func() {
epochCh <- 1
close(epochCh)
}()
cfg := Config{
UserFetcher: mocks.userFetcher,
ContainerFetcher: mocks.containerFetcher,
FrostFSFetcher: mocks.configurationFetcher,
CredentialSource: mocks.credentialSource,
TreeFetcher: mocks.treeFetcher,
Settings: &settingsMock{},
CurrentLifecycler: key,
Logger: log,
EpochChannel: epochCh,
}
f := NewJobProvider(ctx, cfg)
var res []Job
for job := range f.Jobs() {
res = append(res, job)
}
require.Len(t, res, 2)
}
func TestFetcherBearer(t *testing.T) {
ctx := context.Background()
log := zaptest.NewLogger(t)
key, err := keys.NewPrivateKey()
require.NoError(t, err)
mocks, err := initFetcherMocks(1, 2)
require.NoError(t, err)
epochCh := make(chan uint64)
go func() {
epochCh <- 1
close(epochCh)
}()
users, err := mocks.userFetcher.Users()
require.NoError(t, err)
require.Len(t, users, 1)
cids := mocks.containerFetcher.containers[users[0]]
require.Len(t, cids, 2)
// emulate lifecycle container for one bucket
addr := mocks.treeFetcher.configurations[cids[0]]
obj := mocks.configurationFetcher.objects[addr]
addr.SetContainer(cidtest.ID())
mocks.treeFetcher.configurations[cids[0]] = addr
obj.SetContainerID(addr.Container())
mocks.configurationFetcher.objects[addr] = obj
mocks.configurationFetcher.lifecycleContainer = addr.Container()
mocks.configurationFetcher.enableBearerCheck = true
cfg := Config{
UserFetcher: mocks.userFetcher,
ContainerFetcher: mocks.containerFetcher,
FrostFSFetcher: mocks.configurationFetcher,
CredentialSource: mocks.credentialSource,
TreeFetcher: mocks.treeFetcher,
Settings: &settingsMock{},
CurrentLifecycler: key,
Logger: log,
EpochChannel: epochCh,
}
f := NewJobProvider(ctx, cfg)
var res []Job
for job := range f.Jobs() {
res = append(res, job)
}
require.Len(t, res, 2)
}
func TestFetcherCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
log := zaptest.NewLogger(t)
key, err := keys.NewPrivateKey()
require.NoError(t, err)
mocks, err := initFetcherMocks(1, 1)
require.NoError(t, err)
epochCh := make(chan uint64)
go func() {
epochCh <- 1
epochCh <- 2
close(epochCh)
}()
cfg := Config{
UserFetcher: mocks.userFetcher,
ContainerFetcher: mocks.containerFetcher,
FrostFSFetcher: mocks.configurationFetcher,
CredentialSource: mocks.credentialSource,
TreeFetcher: mocks.treeFetcher,
Settings: &settingsMock{},
CurrentLifecycler: key,
Logger: log,
EpochChannel: epochCh,
}
f := NewJobProvider(ctx, cfg)
var res []Job
for job := range f.Jobs() {
res = append(res, job)
}
require.Len(t, res, 1)
}
func TestFetcherCleanBuffer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
log := zaptest.NewLogger(t)
key, err := keys.NewPrivateKey()
require.NoError(t, err)
mocks, err := initFetcherMocks(1, 10)
require.NoError(t, err)
epochCh := make(chan uint64)
f := newJobProvider(ctx, mocks, epochCh, key, log, 11)
epochCh <- 1
for len(f.Jobs()) != 10 { // wait jobs buffer be filled by first epoch works
time.Sleep(100 * time.Millisecond)
}
mocks, err = initFetcherMocks(1, 11)
require.NoError(t, err)
updateFetcherMocks(f, mocks)
epochCh <- 2
close(epochCh)
for len(f.Jobs()) != 11 { // wait jobs buffer be filled by second epoch works
time.Sleep(100 * time.Millisecond)
}
for job := range f.Jobs() {
require.Equal(t, uint64(2), job.Epoch, "not all old epoch job is cleaned from buffer")
}
}
func newJobProvider(ctx context.Context, mocks *fetchersMock, epochCh <-chan uint64, key *keys.PrivateKey, log *zap.Logger, bufferSize int) *JobProvider {
cfg := Config{
UserFetcher: mocks.userFetcher,
ContainerFetcher: mocks.containerFetcher,
FrostFSFetcher: mocks.configurationFetcher,
CredentialSource: mocks.credentialSource,
TreeFetcher: mocks.treeFetcher,
Settings: &settingsMock{},
CurrentLifecycler: key,
Logger: log,
EpochChannel: epochCh,
BufferSize: bufferSize,
}
return NewJobProvider(ctx, cfg)
}
func updateFetcherMocks(f *JobProvider, mocks *fetchersMock) {
f.userFetcher = mocks.userFetcher
f.containerFetcher = mocks.containerFetcher
f.frostfsFetcher = mocks.configurationFetcher
f.credentialSource = mocks.credentialSource
f.treeFetcher = mocks.treeFetcher
}
type fetchersMock struct {
userFetcher *userFetcherMock
containerFetcher *containerFetcherMock
configurationFetcher *frostfsFetcherMock
credentialSource *credentialSourceMock
treeFetcher *treeFetcherMock
}
func initFetcherMocks(users, containers int) (*fetchersMock, error) {
usersMap, err := generateUsersMap(users)
if err != nil {
return nil, err
}
ffsFetcher := newFrostFSFetcherMock()
cnrsMap := make(map[util.Uint160][]cid.ID)
treeMap := make(map[cid.ID]oid.Address)
for hash := range usersMap {
for i := 0; i < containers; i++ {
addr := oidtest.Address()
cnrsMap[hash] = append(cnrsMap[hash], addr.Container())
treeMap[addr.Container()] = addr
lc := &data.LifecycleConfiguration{Rules: []data.LifecycleRule{{ID: addr.EncodeToString()}}}
raw, err := xml.Marshal(lc)
if err != nil {
return nil, err
}
obj := object.New()
obj.SetPayload(raw)
obj.SetContainerID(addr.Container())
obj.SetID(addr.Object())
ffsFetcher.objects[addr] = obj
}
}
return &fetchersMock{
userFetcher: newUserFetcherMock(usersMap),
containerFetcher: newContainerFetcherMock(cnrsMap),
configurationFetcher: ffsFetcher,
credentialSource: newCredentialSourceMock(usersMap),
treeFetcher: newTreeFetcherMock(treeMap),
}, nil
}
func generateKeys(n int) ([]*keys.PrivateKey, error) {
var err error
res := make([]*keys.PrivateKey, n)
for i := 0; i < n; i++ {
if res[i], err = keys.NewPrivateKey(); err != nil {
return nil, err
}
}
return res, nil
}
func generateUsersMap(n int) (map[util.Uint160]*keys.PrivateKey, error) {
res := make(map[util.Uint160]*keys.PrivateKey, n)
userKeys, err := generateKeys(n)
if err != nil {
return nil, err
}
for _, key := range userKeys {
res[key.GetScriptHash()] = key
}
return res, nil
}

View file

@ -1,77 +0,0 @@
package logs
const (
ApplicationStarted = "application started"
ApplicationStopped = "application stopped"
StoppingApplication = "stopping application"
ServiceIsRunning = "service is running"
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port"
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled"
ShuttingDownService = "shutting down service"
CantGracefullyShutDownService = "can't gracefully shut down service, force stop"
CantShutDownService = "can't shut down service"
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
FailedToReloadConfig = "failed to reload config"
LogLevelWontBeUpdated = "log level won't be updated"
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
ListenerStopped = "listener stopped"
MorphClientStopped = "morph client stopped"
MorphClientReconnection = "morph client reconnection..."
ListenerReconnection = "listener reconnection..."
MorphClientCouldntBeReconnected = "morph client couldn't be reconnected"
ListenerCouldntBeReconnected = "listener couldn't be reconnected"
ResolveNetmapContract = "failed to resolve netmap contract"
ResolveFrostfsIDContract = "failed to resolve frostfsid contract"
ResolveContainerContract = "failed to resolve container contract"
NewEpochWasTriggered = "new epoch was triggered"
InitNotificator = "init notificator"
NoMorphRPCEndpoints = "no morph RPC endpoints"
FailedToLoadPrivateKey = "failed to load private key"
NoCredentialSourceWallets = "no credential source wallets"
CouldntCreateWalletSource = "could not create wallet source"
AddedStoragePeer = "added storage peer"
FailedToCreateConnectionPool = "failed to create connection pool"
FailedToDialConnectionPool = "failed to dial connection pool"
FailedToCreateTreePool = "failed to create tree pool"
FailedToDialTreePool = "failed to dial tree pool"
FoundUserContainers = "found user containers"
JobProviderStopped = "job provider stopped"
JobProviderStoppedBecauseOfEpochChan = "job provider stopped because of epoch channel is closed"
FailedToInitMorphClient = "failed to init morph client"
FailedToFetchServicesKeys = "failed to fetch lifecycle services keys"
FailedToFetchUsers = "failed to fetch users"
FailedToHandleUser = "failed to handle user"
FailedToHandleContainer = "failed to handle container"
FetcherTriggerEpoch = "fetcher: trigger epoch, cancel previous fetch"
FetchedUserContainers = "fetched user container configurations"
HandlerTriggered = "handler: triggered"
HandlerContextCanceled = "handler: context canceled"
FailedToSubmitTaskToPool = "failed to submit task to executor pool"
WorkerFailedToHandleJob = "worker failed to handle job"
ExecutorStopped = "executor stopped"
ExecutorStoppedJobsChannelIsClosed = "executor stopped: jobs channel is closed"
SkipRule = "skip rule"
ObjectMatchingFailed = "object matching failed"
AbortMultipartUploads = "abort multiparts uploads"
ExpireObjects = "expire objects"
FailedToGetMultipartCreationEpoch = "failed to get multipart creation epoch"
FailedToAbortMultipart = "failed to abort multipart"
FailedToExpireObject = "failed to expire object"
DeleteObjectVersionFromStorage = "delete object version from storage"
FailedToGenerateRandomIDForDeleteMarker = "failed to generate random id for delete marker"
AddDeleteMarker = "add delete marker"
DeleteObjectVersionFromTree = "delete object version from tree"
EpochMismatched = "epoch mismatched"
UnknownCredentialSource = "unknown credential source to use"
AbortMultipart = "abort multipart"
ProcessingJob = "processing job"
)
const (
TagExpirationDeleteObject = "tag:expiration_delete_object"
TagExpirationProcessVersion = "tag:expiration_process_version"
TagExpirationRemoveVersion = "tag:expiration_remove_version"
TagMultipartDeleteObject = "tag:multipart_delete_object"
TagMultipartProcessUpload = "tag:multipart_process_upload"
)

View file

@ -1,117 +0,0 @@
package metrics
import (
"encoding/json"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
var appMetricsDesc = map[string]map[string]Description{
stateSubsystem: {
healthMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: stateSubsystem,
Name: healthMetric,
Help: "FrostFS S3 Lifecycler state",
},
versionInfoMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: stateSubsystem,
Name: versionInfoMetric,
Help: "Version of current FrostFS S3 Lifecycler instance",
VariableLabels: []string{"version"},
},
},
statisticSubsystem: {
droppedLogs: Description{
Type: dto.MetricType_COUNTER,
Namespace: namespace,
Subsystem: statisticSubsystem,
Name: droppedLogs,
Help: "Dropped logs (by sampling) count",
},
},
}
type Description struct {
Type dto.MetricType
Namespace string
Subsystem string
Name string
Help string
ConstantLabels prometheus.Labels
VariableLabels []string
}
func (d *Description) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Type string `json:"type"`
FQName string `json:"name"`
Help string `json:"help"`
ConstantLabels prometheus.Labels `json:"constant_labels,omitempty"`
VariableLabels []string `json:"variable_labels,omitempty"`
}{
Type: d.Type.String(),
FQName: d.BuildFQName(),
Help: d.Help,
ConstantLabels: d.ConstantLabels,
VariableLabels: d.VariableLabels,
})
}
func (d *Description) BuildFQName() string {
return prometheus.BuildFQName(d.Namespace, d.Subsystem, d.Name)
}
// DescribeAll returns descriptions for metrics.
func DescribeAll() []Description {
var list []Description
for _, m := range appMetricsDesc {
for _, description := range m {
list = append(list, description)
}
}
return list
}
func newOpts(description Description) prometheus.Opts {
return prometheus.Opts{
Namespace: description.Namespace,
Subsystem: description.Subsystem,
Name: description.Name,
Help: description.Help,
ConstLabels: description.ConstantLabels,
}
}
func mustNewGauge(description Description) prometheus.Gauge {
if description.Type != dto.MetricType_GAUGE {
panic("invalid metric type")
}
return prometheus.NewGauge(
prometheus.GaugeOpts(newOpts(description)),
)
}
func mustNewGaugeVec(description Description) *prometheus.GaugeVec {
if description.Type != dto.MetricType_GAUGE {
panic("invalid metric type")
}
return prometheus.NewGaugeVec(
prometheus.GaugeOpts(newOpts(description)),
description.VariableLabels,
)
}
func mustNewCounter(description Description) prometheus.Counter {
if description.Type != dto.MetricType_COUNTER {
panic("invalid metric type")
}
return prometheus.NewCounter(
prometheus.CounterOpts(newOpts(description)),
)
}

View file

@ -1,27 +0,0 @@
//go:build dump_metrics
package metrics
import (
"encoding/json"
"flag"
"os"
"testing"
"github.com/stretchr/testify/require"
)
var metricsPath = flag.String("out", "", "File to export Frostfs S3 lifecycler metrics to.")
func TestDescribeAll(t *testing.T) {
flag.Parse()
require.NotEmpty(t, metricsPath, "flag 'out' must be provided to dump metrics description")
desc := DescribeAll()
data, err := json.Marshal(desc)
require.NoError(t, err)
err = os.WriteFile(*metricsPath, data, 0644)
require.NoError(t, err)
}

View file

@ -1,104 +0,0 @@
package metrics
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
)
type (
// AppMetrics is a metrics container for all app specific data.
AppMetrics struct {
stateMetrics
statisticMetrics
}
// stateMetrics are metrics of application state.
stateMetrics struct {
healthCheck prometheus.Gauge
versionInfo *prometheus.GaugeVec
}
statisticMetrics struct {
droppedLogs prometheus.Counter
}
)
const (
namespace = "frostfs_s3_lifecycler"
stateSubsystem = "state"
healthMetric = "health"
versionInfoMetric = "version_info"
statisticSubsystem = "statistic"
droppedLogs = "dropped_logs"
)
func (m stateMetrics) register() {
prometheus.MustRegister(m.healthCheck)
prometheus.MustRegister(m.versionInfo)
}
func (m stateMetrics) SetHealth(s int32) {
m.healthCheck.Set(float64(s))
}
func (m stateMetrics) SetVersion(ver string) {
m.versionInfo.WithLabelValues(ver).Set(1)
}
func (m statisticMetrics) register() {
prometheus.MustRegister(m.droppedLogs)
}
func (m statisticMetrics) DroppedLogsInc() {
m.droppedLogs.Inc()
}
// NewAppMetrics creates an instance of application.
func NewAppMetrics() *AppMetrics {
stateMetric := newStateMetrics()
stateMetric.register()
statisticMetric := newStatisticMetrics()
statisticMetric.register()
return &AppMetrics{
stateMetrics: *stateMetric,
statisticMetrics: *statisticMetric,
}
}
func newStateMetrics() *stateMetrics {
return &stateMetrics{
healthCheck: mustNewGauge(appMetricsDesc[stateSubsystem][healthMetric]),
versionInfo: mustNewGaugeVec(appMetricsDesc[stateSubsystem][versionInfoMetric]),
}
}
func newStatisticMetrics() *statisticMetrics {
return &statisticMetrics{
droppedLogs: mustNewCounter(appMetricsDesc[statisticSubsystem][droppedLogs]),
}
}
// NewPrometheusService creates a new service for gathering prometheus metrics.
func NewPrometheusService(log *zap.Logger, cfg Config) *Service {
if log == nil {
return nil
}
return &Service{
Server: &http.Server{
Addr: cfg.Address,
Handler: promhttp.Handler(),
},
enabled: cfg.Enabled,
serviceType: "Prometheus",
log: log.With(zap.String("service", "Prometheus")),
}
}

View file

@ -1,33 +0,0 @@
package metrics
import (
"net/http"
"net/http/pprof"
"go.uber.org/zap"
)
// NewPprofService creates a new service for gathering pprof metrics.
func NewPprofService(l *zap.Logger, cfg Config) *Service {
handler := http.NewServeMux()
handler.HandleFunc("/debug/pprof/", pprof.Index)
handler.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
handler.HandleFunc("/debug/pprof/profile", pprof.Profile)
handler.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
handler.HandleFunc("/debug/pprof/trace", pprof.Trace)
// Manually add support for paths linked to by index page at /debug/pprof/
for _, item := range []string{"allocs", "block", "heap", "goroutine", "mutex", "threadcreate"} {
handler.Handle("/debug/pprof/"+item, pprof.Handler(item))
}
return &Service{
Server: &http.Server{
Addr: cfg.Address,
Handler: handler,
},
enabled: cfg.Enabled,
serviceType: "Pprof",
log: l.With(zap.String("service", "Pprof")),
}
}

View file

@ -1,49 +0,0 @@
package metrics
import (
"context"
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/logs"
"go.uber.org/zap"
)
// Service serves metrics.
type Service struct {
*http.Server
enabled bool
log *zap.Logger
serviceType string
}
// Config is a params to configure service.
type Config struct {
Address string
Enabled bool
}
// Start runs http service with the exposed endpoint on the configured port.
func (ms *Service) Start() {
if ms.enabled {
// nolint: truecloudlab-linters
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr))
err := ms.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort)
}
} else {
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled)
}
}
// ShutDown stops the service.
func (ms *Service) ShutDown(ctx context.Context) {
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
err := ms.Shutdown(ctx)
if err != nil {
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
if err = ms.Close(); err != nil {
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
}
}
}

View file

@ -1,119 +0,0 @@
package morph
import (
"context"
"fmt"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/logs"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
type Client struct {
mu sync.RWMutex
client *client.Client
clientOptions []client.Option
log *zap.Logger
key *keys.PrivateKey
connLost chan struct{}
reconnectInterval time.Duration
reconnection chan struct{}
}
type Config struct {
Logger *zap.Logger
Endpoints []client.Endpoint
Key *keys.PrivateKey
ReconnectInterval time.Duration
DialTimeout time.Duration
}
func New(ctx context.Context, cfg Config) (*Client, error) {
c := &Client{
log: cfg.Logger,
key: cfg.Key,
connLost: make(chan struct{}),
reconnectInterval: cfg.ReconnectInterval,
reconnection: make(chan struct{}),
}
c.clientOptions = []client.Option{
client.WithLogger(&logger.Logger{Logger: cfg.Logger}),
client.WithEndpoints(cfg.Endpoints...),
client.WithConnLostCallback(func() { c.connLost <- struct{}{} }),
client.WithDialTimeout(cfg.DialTimeout),
}
if err := c.initNewClient(ctx); err != nil {
return nil, err
}
go c.reconnectRoutine(ctx)
return c, nil
}
func (c *Client) reconnectRoutine(ctx context.Context) {
ticker := time.NewTicker(c.reconnectInterval)
defer func() {
ticker.Stop()
close(c.connLost)
close(c.reconnection)
}()
for {
select {
case <-ctx.Done():
c.log.Info(logs.MorphClientStopped, zap.Error(ctx.Err()))
return
case <-c.connLost:
c.Client().Close()
LOOP:
for {
select {
case <-ctx.Done():
c.log.Info(logs.MorphClientStopped, zap.Error(ctx.Err()))
return
case <-ticker.C:
c.log.Info(logs.MorphClientReconnection)
if err := c.initNewClient(ctx); err != nil {
c.log.Error(logs.MorphClientCouldntBeReconnected, zap.Error(err))
ticker.Reset(c.reconnectInterval)
continue
}
c.reconnection <- struct{}{}
break LOOP
}
}
}
}
}
func (c *Client) initNewClient(ctx context.Context) error {
cli, err := client.New(ctx, c.key, c.clientOptions...)
if err != nil {
return fmt.Errorf("create new client: %w", err)
}
c.mu.Lock()
c.client = cli
c.mu.Unlock()
return nil
}
func (c *Client) Client() *client.Client {
c.mu.RLock()
defer c.mu.RUnlock()
return c.client
}
func (c *Client) ReconnectionChannel() <-chan struct{} {
return c.reconnection
}

View file

@ -1,70 +0,0 @@
package contract
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-contract/commonclient"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/morph"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"go.uber.org/zap"
)
type Container struct {
client *morph.Client
contractHash util.Uint160
log *zap.Logger
}
type ContainerConfig struct {
Client *morph.Client
ContractHash util.Uint160
Log *zap.Logger
}
const (
batchSize = 100
containersOfMethod = "containersOf"
)
func NewContainer(cfg ContainerConfig) *Container {
return &Container{
client: cfg.Client,
contractHash: cfg.ContractHash,
log: cfg.Log,
}
}
func (c *Container) Containers(ownerID user.ID) ([]cid.ID, error) {
items, err := commonclient.ReadIteratorItems(c.client.Client().GetActor(), batchSize, c.contractHash, containersOfMethod, ownerID.WalletBytes())
if err != nil {
return nil, fmt.Errorf("read iterator items (%s): %w", containersOfMethod, err)
}
cidList, err := decodeCID(items)
if err != nil {
return nil, err
}
return cidList, nil
}
func decodeCID(items []stackitem.Item) ([]cid.ID, error) {
cidList := make([]cid.ID, len(items))
for i, item := range items {
rawID, err := client.BytesFromStackItem(item)
if err != nil {
return nil, fmt.Errorf("could not get byte array from stack item: %w", err)
}
if err = cidList[i].Decode(rawID); err != nil {
return nil, fmt.Errorf("decode container id: %w", err)
}
}
return cidList, nil
}

View file

@ -1,101 +0,0 @@
package contract
import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
morphclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/morph"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
)
type FrostFSID struct {
morphClient *morph.Client
contractHash util.Uint160
}
type FrostFSIDConfig struct {
// Client is a multi neo-go client with auto reconnect.
Client *morph.Client
// Contract is hash of contract.
ContractHash util.Uint160
}
// NewFrostFSID creates new FrostfsID contract wrapper.
func NewFrostFSID(cfg FrostFSIDConfig) *FrostFSID {
ffsid := &FrostFSID{
morphClient: cfg.Client,
contractHash: cfg.ContractHash,
}
return ffsid
}
func (f *FrostFSID) Users() ([]util.Uint160, error) {
var res []util.Uint160
err := f.requestWithRetryOnConnectionLost(func() error {
return f.morphClient.Client().TestInvokeIterator(func(item stackitem.Item) error {
raw, err := item.TryBytes()
if err != nil {
return fmt.Errorf("trye item bytes: %w", err)
}
userHash, err := util.Uint160DecodeBytesBE(raw)
if err != nil {
return fmt.Errorf("decode user hash: %w", err)
}
res = append(res, userHash)
return nil
}, 100, f.contractHash, "listSubjects")
})
return res, err
}
func (f *FrostFSID) UserKey(hash util.Uint160) (*keys.PublicKey, error) {
var res *client.Subject
err := f.requestWithRetryOnConnectionLost(func() error {
resItems, err := f.morphClient.Client().TestInvoke(f.contractHash, "getSubject", hash)
if err != nil {
return fmt.Errorf("invoke getSubject: %w", err)
}
if len(resItems) != 1 {
return fmt.Errorf("length getSubject stack unexpected: %d", len(resItems))
}
arr, ok := resItems[0].Value().([]stackitem.Item)
if !ok {
return errors.New("not an array")
}
if res, err = client.ParseSubject(arr); err != nil {
return fmt.Errorf("parse subject: %w", err)
}
return nil
})
if err != nil {
return nil, err
}
return res.PrimaryKey, nil
}
func (f *FrostFSID) requestWithRetryOnConnectionLost(fn func() error) error {
err := fn()
if err == nil {
return nil
}
if !errors.Is(err, morphclient.ErrConnectionLost) {
return err
}
return fn()
}

View file

@ -1,71 +0,0 @@
package notificator
import (
"context"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/logs"
"go.uber.org/zap"
)
// handlerLimiter is a limiter to make some works to be sequential
// and interrupt previous one if new one is submitted.
type handlerLimiter struct {
ctx context.Context
log *zap.Logger
handler NewEpochHandler
work chan func()
mu sync.Mutex
cancelCurrent context.CancelFunc
}
func newHandlerLimiter(ctx context.Context, handler NewEpochHandler, log *zap.Logger) *handlerLimiter {
hl := &handlerLimiter{
ctx: ctx,
log: log,
handler: handler,
work: make(chan func()),
cancelCurrent: func() {},
}
go hl.start(ctx)
return hl
}
func (h *handlerLimiter) start(ctx context.Context) {
for {
select {
case <-ctx.Done():
close(h.work)
return
case work := <-h.work:
work()
}
}
}
func (h *handlerLimiter) replaceCurrentWorkContext(ctx context.Context) (workCtx context.Context) {
h.mu.Lock()
defer h.mu.Unlock()
h.cancelCurrent()
workCtx, h.cancelCurrent = context.WithCancel(ctx)
return workCtx
}
func (h *handlerLimiter) Handler(e event.Event) {
ee, ok := e.(NewEpochEvent)
if !ok {
return
}
workCtx := h.replaceCurrentWorkContext(h.ctx)
h.log.Debug(logs.NewEpochWasTriggered, zap.Uint64("epoch", ee.Epoch))
h.work <- func() {
h.handler(workCtx, ee)
}
}

View file

@ -1,154 +0,0 @@
package notificator
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/morph"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"go.uber.org/zap"
)
type ListenerImpl struct {
client *morph.Client
log *zap.Logger
reconnectInterval time.Duration
parser event.NotificationParserInfo
handler event.NotificationHandlerInfo
blockNumber atomic.Uint32
once sync.Once
mu sync.RWMutex
listener event.Listener
}
type ConfigListener struct {
Client *morph.Client
Logger *zap.Logger
ReconnectInterval time.Duration
Parser event.NotificationParserInfo
Handler event.NotificationHandlerInfo
}
var _ Listener = (*ListenerImpl)(nil)
func NewListener(ctx context.Context, cfg ConfigListener) (*ListenerImpl, error) {
l := &ListenerImpl{
client: cfg.Client,
log: cfg.Logger,
reconnectInterval: cfg.ReconnectInterval,
parser: cfg.Parser,
handler: cfg.Handler,
}
if err := l.initNewListener(ctx); err != nil {
return nil, err
}
return l, nil
}
func (l *ListenerImpl) Listen(ctx context.Context) {
l.once.Do(func() {
l.setParsersAndHandlers()
go l.currentListener().Listen(ctx)
l.reconnectRoutine(ctx)
})
}
func (l *ListenerImpl) reconnectRoutine(ctx context.Context) {
ticker := time.NewTicker(l.reconnectInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
l.log.Info(logs.ListenerStopped, zap.Error(ctx.Err()))
return
case <-l.client.ReconnectionChannel():
LOOP:
for {
select {
case <-ctx.Done():
l.log.Info(logs.ListenerStopped, zap.Error(ctx.Err()))
return
case <-ticker.C:
l.log.Info(logs.ListenerReconnection)
if err := l.initNewListener(ctx); err != nil {
l.log.Error(logs.ListenerCouldntBeReconnected, zap.Error(err))
ticker.Reset(l.reconnectInterval)
continue
}
l.setParsersAndHandlers()
go l.currentListener().Listen(ctx)
break LOOP
}
}
}
}
}
func (l *ListenerImpl) initNewListener(ctx context.Context) error {
currentBlock, err := l.client.Client().BlockCount()
if err != nil {
return fmt.Errorf("get block count: %w", err)
}
latestBlock := l.blockNumber.Load()
if currentBlock > latestBlock {
latestBlock = currentBlock
}
morphLogger := &logger.Logger{Logger: l.log}
subs, err := subscriber.New(ctx, &subscriber.Params{
Log: morphLogger,
StartFromBlock: latestBlock,
Client: l.client.Client(),
})
if err != nil {
return fmt.Errorf("create subscriber: %w", err)
}
ln, err := event.NewListener(event.ListenerParams{
Logger: morphLogger,
Subscriber: subs,
WorkerPoolCapacity: 0, // 0 means "infinite"
})
if err != nil {
return err
}
l.mu.Lock()
l.listener = ln
l.mu.Unlock()
return nil
}
func (l *ListenerImpl) currentListener() event.Listener {
l.mu.RLock()
defer l.mu.RUnlock()
return l.listener
}
func (l *ListenerImpl) setParsersAndHandlers() {
l.mu.RLock()
defer l.mu.RUnlock()
l.listener.SetNotificationParser(l.parser)
l.listener.RegisterNotificationHandler(l.handler)
l.listener.RegisterBlockHandler(l.blockHandler)
}
func (l *ListenerImpl) blockHandler(block *block.Block) {
l.blockNumber.Store(block.Index)
}

View file

@ -1,120 +0,0 @@
package notificator
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"go.uber.org/zap"
)
type NewEpochHandler func(ctx context.Context, ee NewEpochEvent)
type NewEpochEvent struct {
Epoch uint64
}
func (n NewEpochEvent) MorphEvent() {}
type Listener interface {
// Listen must start the event listener.
//
// Must listen to events with the parser installed.
Listen(context.Context)
}
type ListenerConfig struct {
Parser event.NotificationParserInfo
Handler event.NotificationHandlerInfo
}
type Notificator struct {
logger *zap.Logger
listener Listener
handler *handlerLimiter
}
type Config struct {
Handler NewEpochHandler
Logger *zap.Logger
NewListenerFn func(ListenerConfig) (Listener, error)
NetmapContract util.Uint160
}
const newEpochEventType = event.Type("NewEpoch")
func New(ctx context.Context, cfg Config) (*Notificator, error) {
notifier := &Notificator{
handler: newHandlerLimiter(ctx, cfg.Handler, cfg.Logger),
logger: cfg.Logger,
}
var npi event.NotificationParserInfo
npi.SetScriptHash(cfg.NetmapContract)
npi.SetType(newEpochEventType)
npi.SetParser(newEpochEventParser())
var nhi event.NotificationHandlerInfo
nhi.SetType(newEpochEventType)
nhi.SetScriptHash(cfg.NetmapContract)
nhi.SetHandler(notifier.handler.Handler)
ln, err := cfg.NewListenerFn(ListenerConfig{
Parser: npi,
Handler: nhi,
})
if err != nil {
return nil, fmt.Errorf("create new listener: %w", err)
}
notifier.listener = ln
return notifier, nil
}
// Start runs listener to process notifications.
// Method MUST be invoked once after successful initialization with New
// otherwise panic can happen.
func (n *Notificator) Start(ctx context.Context) {
n.listener.Listen(ctx)
}
func newEpochEventParser() event.NotificationParser {
return func(ne *state.ContainedNotificationEvent) (event.Event, error) {
arr, err := arrayFromStackItem(ne.Item)
if err != nil {
return nil, fmt.Errorf("notification event item is invalid: %w", err)
}
if len(arr) != 1 {
return nil, fmt.Errorf("notification event item array has invalid length: %d", len(arr))
}
epoch, err := arr[0].TryInteger()
if err != nil {
return nil, err
}
return NewEpochEvent{Epoch: epoch.Uint64()}, nil
}
}
// arrayFromStackItem returns the slice contract parameters from passed parameter.
// If passed parameter carries boolean false value, (nil, nil) returns.
func arrayFromStackItem(param stackitem.Item) ([]stackitem.Item, error) {
switch param.Type() {
case stackitem.AnyT:
return nil, nil
case stackitem.ArrayT, stackitem.StructT:
items, ok := param.Value().([]stackitem.Item)
if !ok {
return nil, fmt.Errorf("can't convert %T to parameter slice", param.Value())
}
return items, nil
default:
return nil, fmt.Errorf("%s is not an array type", param.Type())
}
}

View file

@ -1,124 +0,0 @@
package notificator
import (
"context"
"sync"
"sync/atomic"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
type scriptHashWithType struct {
eventType event.Type
contractHash util.Uint160
}
type listenerMock struct {
scriptHashWithType
parser event.NotificationParserInfo
handler event.NotificationHandlerInfo
}
func newListenerMock(hash util.Uint160) *listenerMock {
return &listenerMock{
scriptHashWithType: scriptHashWithType{
eventType: newEpochEventType,
contractHash: hash,
},
}
}
func (l *listenerMock) sendNotification(epochEvent NewEpochEvent) error {
l.handler.Handler()(epochEvent)
return nil
}
func (l *listenerMock) Listen(context.Context) {}
func TestNotificatorBase(t *testing.T) {
ctx := context.Background()
logger := zaptest.NewLogger(t)
contractHash, err := util.Uint160DecodeStringLE("a0520ef5e7b9dd89ba49cce9cac1a6332d3facc0")
require.NoError(t, err)
var sequentialHandlerFlag atomic.Bool
var gotEvent NewEpochEvent
var wg sync.WaitGroup
handler := func(_ context.Context, ee NewEpochEvent) {
require.False(t, sequentialHandlerFlag.Load())
sequentialHandlerFlag.Store(true)
gotEvent = ee
time.Sleep(time.Second)
sequentialHandlerFlag.Store(false)
wg.Done()
}
lnMock := newListenerMock(contractHash)
cfg := Config{
Handler: handler,
Logger: logger,
NetmapContract: contractHash,
NewListenerFn: func(config ListenerConfig) (Listener, error) {
lnMock.parser = config.Parser
lnMock.handler = config.Handler
return lnMock, nil
},
}
n, err := New(ctx, cfg)
require.NoError(t, err)
go n.Start(ctx)
ee := NewEpochEvent{Epoch: 1}
sendNotification(t, lnMock, ee, &wg)
require.Equal(t, ee.Epoch, gotEvent.Epoch)
ee = NewEpochEvent{Epoch: 2}
sendNotification(t, lnMock, ee, &wg)
require.Equal(t, ee.Epoch, gotEvent.Epoch)
}
func sendNotification(t *testing.T, lnMock *listenerMock, ee NewEpochEvent, wg *sync.WaitGroup) {
wg.Add(1)
err := lnMock.sendNotification(ee)
require.NoError(t, err)
wg.Wait()
}
func TestLimiter(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
log := zaptest.NewLogger(t)
var (
interrupted atomic.Bool
wg sync.WaitGroup
)
wg.Add(1)
handler := NewEpochHandler(func(ctx context.Context, ee NewEpochEvent) {
defer wg.Done()
select {
case <-ctx.Done():
interrupted.Store(true)
case <-time.After(3 * time.Second):
log.Warn("handler executed successfully", zap.Any("event", ee))
}
})
hl := newHandlerLimiter(ctx, handler, log)
hl.Handler(NewEpochEvent{Epoch: 1})
cancel()
wg.Wait()
require.True(t, interrupted.Load())
}

View file

@ -1,68 +0,0 @@
package resolver
import (
"errors"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/internal/morph"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
)
// ResolveContractHash determine contract hash by resolving NNS name.
func ResolveContractHash(cli *morph.Client, contractHash string) (util.Uint160, error) {
if hash, err := util.Uint160DecodeStringLE(contractHash); err == nil {
return hash, nil
}
if strings.Count(contractHash, ".") != 1 {
return util.Uint160{}, fmt.Errorf("invalid contract name: '%s'", contractHash)
}
nnsContract, err := cli.Client().NNSHash()
if err != nil {
return util.Uint160{}, fmt.Errorf("get NNS contract hash: %w", err)
}
return invokeResolve(cli, nnsContract, contractHash)
}
func invokeResolve(cli *morph.Client, nnsContract util.Uint160, contractHash string) (util.Uint160, error) {
items, err := cli.Client().TestInvoke(nnsContract, "resolve", contractHash, int64(nns.TXT))
if err != nil {
return util.Uint160{}, fmt.Errorf("contract invocation: %w", err)
}
if len(items) != 1 {
return util.Uint160{}, fmt.Errorf("stack has %d items", len(items))
}
if _, ok := items[0].(stackitem.Null); !ok {
arr, ok := items[0].Value().([]stackitem.Item)
if !ok {
// unexpected for types from stackitem package
return util.Uint160{}, errors.New("invalid cast to stack item slice")
}
for i := range arr {
recordValue, err := arr[i].TryBytes()
if err != nil {
return util.Uint160{}, fmt.Errorf("convert array item to byte slice: %w", err)
}
strRecordValue := string(recordValue)
scriptHash, err := address.StringToUint160(strRecordValue)
if err == nil {
return scriptHash, nil
}
scriptHash, err = util.Uint160DecodeStringLE(strRecordValue)
if err == nil {
return scriptHash, nil
}
}
}
return util.Uint160{}, errors.New("record not found")
}