forked from TrueCloudLab/frostfs-node
WIP: Morph: Add unit tests #2
538 changed files with 12747 additions and 7395 deletions
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.18 as builder
|
||||
FROM golang:1.20 as builder
|
||||
ARG BUILD=now
|
||||
ARG VERSION=dev
|
||||
ARG REPO=repository
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.19
|
||||
FROM golang:1.20
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.18 as builder
|
||||
FROM golang:1.20 as builder
|
||||
ARG BUILD=now
|
||||
ARG VERSION=dev
|
||||
ARG REPO=repository
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.18 as builder
|
||||
FROM golang:1.20 as builder
|
||||
ARG BUILD=now
|
||||
ARG VERSION=dev
|
||||
ARG REPO=repository
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.18 as builder
|
||||
FROM golang:1.20 as builder
|
||||
ARG BUILD=now
|
||||
ARG VERSION=dev
|
||||
ARG REPO=repository
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
FROM golang:1.18 as builder
|
||||
ARG BUILD=now
|
||||
ARG VERSION=dev
|
||||
ARG REPO=repository
|
||||
WORKDIR /src
|
||||
COPY . /src
|
||||
|
||||
RUN make bin/frostfs-node
|
||||
|
||||
# Executable image
|
||||
FROM alpine AS frostfs-node
|
||||
RUN apk add --no-cache bash
|
||||
|
||||
WORKDIR /
|
||||
|
||||
COPY --from=builder /src/bin/frostfs-node /bin/frostfs-node
|
||||
COPY --from=builder /src/config/testnet/config.yml /config.yml
|
||||
|
||||
CMD ["frostfs-node", "--config", "/config.yml"]
|
38
.forgejo/workflows/build.yml
Normal file
38
.forgejo/workflows/build.yml
Normal file
|
@ -0,0 +1,38 @@
|
|||
name: Build
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build Components
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.19', '1.20' ]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# Allows to fetch all history for all branches and tags.
|
||||
# Need this for proper versioning.
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build CLI
|
||||
run: make bin/frostfs-cli
|
||||
|
||||
- name: Build NODE
|
||||
run: make bin/frostfs-node
|
||||
|
||||
- name: Build IR
|
||||
run: make bin/frostfs-ir
|
||||
|
||||
- name: Build ADM
|
||||
run: make bin/frostfs-adm
|
||||
|
||||
- name: Build LENS
|
||||
run: make bin/frostfs-lens
|
72
.forgejo/workflows/tests.yml
Normal file
72
.forgejo/workflows/tests.yml
Normal file
|
@ -0,0 +1,72 @@
|
|||
name: Tests and linters
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
cache: true
|
||||
|
||||
- name: golangci-lint
|
||||
uses: https://github.com/golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: latest
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.19', '1.20' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
cache: true
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
tests-race:
|
||||
name: Tests with -race
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
cache: true
|
||||
|
||||
- name: Run tests
|
||||
run: go test ./... -count=1 -race
|
||||
|
||||
staticcheck:
|
||||
name: Staticcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
cache: true
|
||||
|
||||
- name: Install staticcheck
|
||||
run: make staticcheck-install
|
||||
|
||||
- name: Run staticcheck
|
||||
run: make staticcheck-run
|
22
.forgejo/workflows/vulncheck.yml
Normal file
22
.forgejo/workflows/vulncheck.yml
Normal file
|
@ -0,0 +1,22 @@
|
|||
name: Vulncheck
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
vulncheck:
|
||||
name: Vulncheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
|
@ -4,7 +4,7 @@
|
|||
# options for analysis running
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
timeout: 20m
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: false
|
||||
|
@ -31,6 +31,12 @@ linters-settings:
|
|||
statements: 60 # default 40
|
||||
gocognit:
|
||||
min-complexity: 40 # default 30
|
||||
importas:
|
||||
no-unaliased: true
|
||||
no-extra-aliases: false
|
||||
alias:
|
||||
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
|
||||
alias: objectSDK
|
||||
|
||||
linters:
|
||||
enable:
|
||||
|
@ -62,5 +68,6 @@ linters:
|
|||
- funlen
|
||||
- gocognit
|
||||
- contextcheck
|
||||
- importas
|
||||
disable-all: true
|
||||
fast: false
|
||||
|
|
27
CHANGELOG.md
27
CHANGELOG.md
|
@ -6,17 +6,44 @@ Changelog for FrostFS Node
|
|||
### Added
|
||||
- Support impersonate bearer token (#229)
|
||||
- Change log level on SIGHUP for ir (#125)
|
||||
- Reload pprof and metrics on SIGHUP for ir (#125)
|
||||
- Support copies number parameter in `frostfs-cli object put` (#351)
|
||||
- Set extra wallets on SIGHUP for ir (#125)
|
||||
- Writecache metrics (#312)
|
||||
- Add tree service metrics (#370)
|
||||
|
||||
### Changed
|
||||
- `frostfs-cli util locode generate` is now much faster (#309)
|
||||
### Fixed
|
||||
- Take network settings into account during netmap contract update (#100)
|
||||
- Read config files from dir even if config file not provided via `--config` for node (#238)
|
||||
- Notary requests parsing according to `neo-go`'s updates (#268)
|
||||
- Tree service panic in its internal client cache (#322)
|
||||
- Iterate over endpoints when create ws client in morph's constructor (#304)
|
||||
- Delete complex objects with GC (#332)
|
||||
|
||||
### Removed
|
||||
### Updated
|
||||
- `neo-go` to `v0.101.1`
|
||||
- `google.golang.org/grpc` to `v1.55.0`
|
||||
- `paulmach/orb` to `v0.9.2`
|
||||
- `go.etcd.io/bbolt` to `v1.3.7`
|
||||
- `github.com/nats-io/nats.go` to `v1.25.0`
|
||||
- `golang.org/x/sync` to `v0.2.0`
|
||||
- `golang.org/x/term` to `v0.8.0`
|
||||
- `github.com/spf13/cobra` to `v1.7.0`
|
||||
- `github.com/panjf2000/ants/v2` `v2.7.4`
|
||||
- `github.com/multiformats/go-multiaddr` to `v0.9.0`
|
||||
- `github.com/hashicorp/golang-lru/v2` to `v2.0.2`
|
||||
- `go.uber.org/atomic` to `v1.11.0`
|
||||
- Minimum go version to v1.19
|
||||
- `github.com/prometheus/client_golang` to `v1.15.1`
|
||||
- `github.com/prometheus/client_model` to `v0.4.0`
|
||||
- `go.opentelemetry.io/otel` to `v1.15.1`
|
||||
- `go.opentelemetry.io/otel/trace` to `v1.15.1`
|
||||
- `github.com/spf13/cast` to `v1.5.1`
|
||||
- `git.frostfs.info/TrueCloudLab/hrw` to `v1.2.1`
|
||||
|
||||
### Updating from v0.36.0
|
||||
|
||||
## [v0.36.0] - 2023-04-12 - Furtwängler
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
First, thank you for contributing! We love and encourage pull requests from
|
||||
everyone. Please follow the guidelines:
|
||||
|
||||
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-node/issues) and
|
||||
[pull requests](https://github.com/TrueCloudLab/frostfs-node/pulls) for existing
|
||||
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-node/issues) and
|
||||
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-node/pulls) for existing
|
||||
discussions.
|
||||
|
||||
- Open an issue first, to discuss a new feature or enhancement.
|
||||
|
@ -27,19 +27,19 @@ Start by forking the `frostfs-node` repository, make changes in a branch and the
|
|||
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||
are the steps in details:
|
||||
|
||||
### Set up your GitHub Repository
|
||||
Fork [FrostFS node upstream](https://github.com/TrueCloudLab/frostfs-node/fork) source
|
||||
### Set up your Forgejo repository
|
||||
Fork [FrostFS node upstream](https://git.frostfs.info/TrueCloudLab/frostfs-node) source
|
||||
repository to your own personal repository. Copy the URL of your fork (you will
|
||||
need it for the `git clone` command below).
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/TrueCloudLab/frostfs-node
|
||||
$ git clone https://git.frostfs.info/TrueCloudLab/frostfs-node
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
```sh
|
||||
$ cd frostfs-node
|
||||
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-node
|
||||
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-node
|
||||
$ git fetch upstream
|
||||
$ git merge upstream/master
|
||||
...
|
||||
|
@ -58,7 +58,7 @@ $ git checkout -b feature/123-something_awesome
|
|||
After your code changes, make sure
|
||||
|
||||
- To add test cases for the new code.
|
||||
- To run `make lint`
|
||||
- To run `make lint` and `make staticcheck-run`
|
||||
- To squash your commits into a single commit or a series of logically separated
|
||||
commits run `git rebase -i`. It's okay to force update your pull request.
|
||||
- To run `make test` and `make all` completes.
|
||||
|
@ -89,8 +89,8 @@ $ git push origin feature/123-something_awesome
|
|||
```
|
||||
|
||||
### Create a Pull Request
|
||||
Pull requests can be created via GitHub. Refer to [this
|
||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
||||
Pull requests can be created via Forgejo. Refer to [this
|
||||
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||
reviewed and approved, it will be merged.
|
||||
|
||||
|
|
10
Makefile
10
Makefile
|
@ -95,7 +95,7 @@ image-%:
|
|||
-t $(HUB_IMAGE)-$*:$(HUB_TAG) .
|
||||
|
||||
# Build all Docker images
|
||||
images: image-storage image-ir image-cli image-adm image-storage-testnet
|
||||
images: image-storage image-ir image-cli image-adm
|
||||
|
||||
# Build dirty local Docker images
|
||||
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
|
||||
|
@ -126,7 +126,7 @@ imports:
|
|||
# Run Unit Test with go test
|
||||
test:
|
||||
@echo "⇒ Running go test"
|
||||
@go test ./...
|
||||
@go test ./... -count=1
|
||||
|
||||
pre-commit-run:
|
||||
@pre-commit run -a --hook-stage manual
|
||||
|
@ -135,8 +135,12 @@ pre-commit-run:
|
|||
lint:
|
||||
@golangci-lint --timeout=5m run
|
||||
|
||||
# Install staticcheck
|
||||
staticcheck-install:
|
||||
@go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
# Run staticcheck
|
||||
staticcheck:
|
||||
staticcheck-run:
|
||||
@staticcheck ./...
|
||||
|
||||
# Run linters in Docker
|
||||
|
|
|
@ -49,7 +49,7 @@ The latest version of frostfs-node works with frostfs-contract
|
|||
|
||||
# Building
|
||||
|
||||
To make all binaries you need Go 1.18+ and `make`:
|
||||
To make all binaries you need Go 1.19+ and `make`:
|
||||
```
|
||||
make all
|
||||
```
|
||||
|
|
|
@ -18,6 +18,7 @@ To start a network, you need a set of consensus nodes, the same number of
|
|||
Alphabet nodes and any number of Storage nodes. While the number of Storage
|
||||
nodes can be scaled almost infinitely, the number of consensus and Alphabet
|
||||
nodes can't be changed so easily right now. Consider this before going any further.
|
||||
Note also that there is an upper limit on the number of alphabet nodes (currently 22).
|
||||
|
||||
It is easier to use`frostfs-adm` with a predefined configuration. First, create
|
||||
a network configuration file. In this example, there is going to be only one
|
||||
|
|
|
@ -37,11 +37,6 @@ const (
|
|||
dumpBalancesAlphabetFlag = "alphabet"
|
||||
dumpBalancesProxyFlag = "proxy"
|
||||
dumpBalancesUseScriptHashFlag = "script-hash"
|
||||
|
||||
// notaryEnabled signifies whether contracts were deployed in a notary-enabled environment.
|
||||
// The setting is here to simplify testing and building the command for testnet (notary currently disabled).
|
||||
// It will be removed eventually.
|
||||
notaryEnabled = true
|
||||
)
|
||||
|
||||
func dumpBalances(cmd *cobra.Command, _ []string) error {
|
||||
|
@ -60,7 +55,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
|
|||
|
||||
inv := invoker.New(c, nil)
|
||||
|
||||
if !notaryEnabled || dumpStorage || dumpAlphabet || dumpProxy {
|
||||
if dumpStorage || dumpAlphabet || dumpProxy {
|
||||
nnsCs, err = c.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get NNS contract info: %w", err)
|
||||
|
@ -72,7 +67,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
}
|
||||
|
||||
irList, err := fetchIRNodes(c, nmHash, rolemgmt.Hash)
|
||||
irList, err := fetchIRNodes(c, rolemgmt.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -187,40 +182,22 @@ func printAlphabetContractBalances(cmd *cobra.Command, c Client, inv *invoker.In
|
|||
return nil
|
||||
}
|
||||
|
||||
func fetchIRNodes(c Client, nmHash, desigHash util.Uint160) ([]accBalancePair, error) {
|
||||
var irList []accBalancePair
|
||||
|
||||
func fetchIRNodes(c Client, desigHash util.Uint160) ([]accBalancePair, error) {
|
||||
inv := invoker.New(c, nil)
|
||||
|
||||
if notaryEnabled {
|
||||
height, err := c.GetBlockCount()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get block height: %w", err)
|
||||
}
|
||||
height, err := c.GetBlockCount()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get block height: %w", err)
|
||||
}
|
||||
|
||||
arr, err := getDesignatedByRole(inv, desigHash, noderoles.NeoFSAlphabet, height)
|
||||
if err != nil {
|
||||
return nil, errors.New("can't fetch list of IR nodes from the netmap contract")
|
||||
}
|
||||
arr, err := getDesignatedByRole(inv, desigHash, noderoles.NeoFSAlphabet, height)
|
||||
if err != nil {
|
||||
return nil, errors.New("can't fetch list of IR nodes from the netmap contract")
|
||||
}
|
||||
|
||||
irList = make([]accBalancePair, len(arr))
|
||||
for i := range arr {
|
||||
irList[i].scriptHash = arr[i].GetScriptHash()
|
||||
}
|
||||
} else {
|
||||
arr, err := unwrap.ArrayOfBytes(inv.Call(nmHash, "innerRingList"))
|
||||
if err != nil {
|
||||
return nil, errors.New("can't fetch list of IR nodes from the netmap contract")
|
||||
}
|
||||
|
||||
irList = make([]accBalancePair, len(arr))
|
||||
for i := range arr {
|
||||
pub, err := keys.NewPublicKeyFromBytes(arr[i], elliptic.P256())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't parse IR node public key: %w", err)
|
||||
}
|
||||
irList[i].scriptHash = pub.GetScriptHash()
|
||||
}
|
||||
irList := make([]accBalancePair, len(arr))
|
||||
for i := range arr {
|
||||
irList[i].scriptHash = arr[i].GetScriptHash()
|
||||
}
|
||||
return irList, nil
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -38,6 +39,9 @@ func generateAlphabetCreds(cmd *cobra.Command, _ []string) error {
|
|||
if size == 0 {
|
||||
return errors.New("size must be > 0")
|
||||
}
|
||||
if size > maxAlphabetNodes {
|
||||
return ErrTooManyAlphabetNodes
|
||||
}
|
||||
|
||||
v := viper.GetViper()
|
||||
walletDir := config.ResolveHomePath(viper.GetString(alphabetWalletsFlag))
|
||||
|
@ -92,28 +96,32 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
|
|||
pubs[i] = w.Accounts[0].PrivateKey().PublicKey()
|
||||
}
|
||||
|
||||
var errG errgroup.Group
|
||||
|
||||
// Create committee account with N/2+1 multi-signature.
|
||||
majCount := smartcontract.GetMajorityHonestNodeCount(size)
|
||||
for i, w := range wallets {
|
||||
if err := addMultisigAccount(w, majCount, committeeAccountName, passwords[i], pubs); err != nil {
|
||||
return nil, fmt.Errorf("can't create committee account: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create consensus account with 2*N/3+1 multi-signature.
|
||||
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
|
||||
for i, w := range wallets {
|
||||
if err := addMultisigAccount(w, bftCount, consensusAccountName, passwords[i], pubs); err != nil {
|
||||
return nil, fmt.Errorf("can't create consensus account: %w", err)
|
||||
}
|
||||
for i := range wallets {
|
||||
i := i
|
||||
ps := make(keys.PublicKeys, len(pubs))
|
||||
copy(ps, pubs)
|
||||
errG.Go(func() error {
|
||||
if err := addMultisigAccount(wallets[i], majCount, committeeAccountName, passwords[i], ps); err != nil {
|
||||
return fmt.Errorf("can't create committee account: %w", err)
|
||||
}
|
||||
if err := addMultisigAccount(wallets[i], bftCount, consensusAccountName, passwords[i], ps); err != nil {
|
||||
return fmt.Errorf("can't create consentus account: %w", err)
|
||||
}
|
||||
if err := wallets[i].SavePretty(); err != nil {
|
||||
return fmt.Errorf("can't save wallet: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
for _, w := range wallets {
|
||||
if err := w.SavePretty(); err != nil {
|
||||
return nil, fmt.Errorf("can't save wallet: %w", err)
|
||||
}
|
||||
if err := errG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return passwords, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
|
@ -71,24 +72,31 @@ func TestGenerateAlphabet(t *testing.T) {
|
|||
buf.WriteString(testContractPassword + "\r")
|
||||
require.NoError(t, generateAlphabetCreds(generateAlphabetCmd, nil))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := uint64(0); i < size; i++ {
|
||||
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||
w, err := wallet.NewWalletFromFile(p)
|
||||
require.NoError(t, err, "wallet doesn't exist")
|
||||
require.Equal(t, 3, len(w.Accounts), "not all accounts were created")
|
||||
for _, a := range w.Accounts {
|
||||
err := a.Decrypt(strconv.FormatUint(i, 10), keys.NEP2ScryptParams())
|
||||
require.NoError(t, err, "can't decrypt account")
|
||||
switch a.Label {
|
||||
case consensusAccountName:
|
||||
require.Equal(t, smartcontract.GetDefaultHonestNodeCount(size), len(a.Contract.Parameters))
|
||||
case committeeAccountName:
|
||||
require.Equal(t, smartcontract.GetMajorityHonestNodeCount(size), len(a.Contract.Parameters))
|
||||
default:
|
||||
require.Equal(t, singleAccountName, a.Label)
|
||||
i := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||
w, err := wallet.NewWalletFromFile(p)
|
||||
require.NoError(t, err, "wallet doesn't exist")
|
||||
require.Equal(t, 3, len(w.Accounts), "not all accounts were created")
|
||||
|
||||
for _, a := range w.Accounts {
|
||||
err := a.Decrypt(strconv.FormatUint(i, 10), keys.NEP2ScryptParams())
|
||||
require.NoError(t, err, "can't decrypt account")
|
||||
switch a.Label {
|
||||
case consensusAccountName:
|
||||
require.Equal(t, smartcontract.GetDefaultHonestNodeCount(size), len(a.Contract.Parameters))
|
||||
case committeeAccountName:
|
||||
require.Equal(t, smartcontract.GetMajorityHonestNodeCount(size), len(a.Contract.Parameters))
|
||||
default:
|
||||
require.Equal(t, singleAccountName, a.Label)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
t.Run("check contract group wallet", func(t *testing.T) {
|
||||
p := filepath.Join(walletDir, contractWalletFilename)
|
||||
|
|
|
@ -23,6 +23,13 @@ import (
|
|||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size
|
||||
// of the invocation script.
|
||||
// See: https://github.com/nspcc-dev/neo-go/blob/740488f7f35e367eaa99a71c0a609c315fe2b0fc/pkg/core/transaction/witness.go#L10
|
||||
maxAlphabetNodes = 22
|
||||
)
|
||||
|
||||
type cache struct {
|
||||
nnsCs *state.Contract
|
||||
groupKey *keys.PublicKey
|
||||
|
@ -45,6 +52,8 @@ type initializeContext struct {
|
|||
ContractPath string
|
||||
}
|
||||
|
||||
var ErrTooManyAlphabetNodes = fmt.Errorf("too many alphabet nodes (maximum allowed is %d)", maxAlphabetNodes)
|
||||
|
||||
func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
|
||||
initCtx, err := newInitializeContext(cmd, viper.GetViper())
|
||||
if err != nil {
|
||||
|
@ -111,6 +120,10 @@ func newInitializeContext(cmd *cobra.Command, v *viper.Viper) (*initializeContex
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if len(wallets) > maxAlphabetNodes {
|
||||
return nil, ErrTooManyAlphabetNodes
|
||||
}
|
||||
|
||||
needContracts := cmd.Name() == "update-contracts" || cmd.Name() == "init"
|
||||
|
||||
var w *wallet.Wallet
|
||||
|
@ -197,11 +210,11 @@ func validateInit(cmd *cobra.Command) error {
|
|||
func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) (Client, error) {
|
||||
var c Client
|
||||
var err error
|
||||
if v.GetString(localDumpFlag) != "" {
|
||||
if v.GetString(endpointFlag) != "" {
|
||||
if ldf := cmd.Flags().Lookup(localDumpFlag); ldf != nil && ldf.Changed {
|
||||
if cmd.Flags().Changed(endpointFlag) {
|
||||
return nil, fmt.Errorf("`%s` and `%s` flags are mutually exclusive", endpointFlag, localDumpFlag)
|
||||
}
|
||||
c, err = newLocalClient(cmd, v, wallets)
|
||||
c, err = newLocalClient(cmd, v, wallets, ldf.Value.String())
|
||||
} else {
|
||||
c, err = getN3Client(v)
|
||||
}
|
||||
|
|
|
@ -19,33 +19,24 @@ import (
|
|||
)
|
||||
|
||||
// initialAlphabetNEOAmount represents the total amount of GAS distributed between alphabet nodes.
|
||||
const initialAlphabetNEOAmount = native.NEOTotalSupply
|
||||
|
||||
func (c *initializeContext) registerCandidates() error {
|
||||
neoHash := neo.Hash
|
||||
|
||||
cc, err := unwrap.Array(c.ReadOnlyInvoker.Call(neoHash, "getCandidates"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("`getCandidates`: %w", err)
|
||||
}
|
||||
|
||||
if len(cc) > 0 {
|
||||
c.Command.Println("Candidates are already registered.")
|
||||
return nil
|
||||
}
|
||||
const (
|
||||
initialAlphabetNEOAmount = native.NEOTotalSupply
|
||||
registerBatchSize = transaction.MaxAttributes - 1
|
||||
)
|
||||
|
||||
func (c *initializeContext) registerCandidateRange(start, end int) error {
|
||||
regPrice, err := c.getCandidateRegisterPrice()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch registration price: %w", err)
|
||||
}
|
||||
|
||||
w := io.NewBufBinWriter()
|
||||
emit.AppCall(w.BinWriter, neoHash, "setRegisterPrice", callflag.States, 1)
|
||||
for _, acc := range c.Accounts {
|
||||
emit.AppCall(w.BinWriter, neoHash, "registerCandidate", callflag.States, acc.PrivateKey().PublicKey().Bytes())
|
||||
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, 1)
|
||||
for _, acc := range c.Accounts[start:end] {
|
||||
emit.AppCall(w.BinWriter, neo.Hash, "registerCandidate", callflag.States, acc.PrivateKey().PublicKey().Bytes())
|
||||
emit.Opcodes(w.BinWriter, opcode.ASSERT)
|
||||
}
|
||||
emit.AppCall(w.BinWriter, neoHash, "setRegisterPrice", callflag.States, regPrice)
|
||||
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice)
|
||||
if w.Err != nil {
|
||||
panic(fmt.Sprintf("BUG: %v", w.Err))
|
||||
}
|
||||
|
@ -54,14 +45,14 @@ func (c *initializeContext) registerCandidates() error {
|
|||
Signer: c.getSigner(false, c.CommitteeAcc),
|
||||
Account: c.CommitteeAcc,
|
||||
}}
|
||||
for i := range c.Accounts {
|
||||
for _, acc := range c.Accounts[start:end] {
|
||||
signers = append(signers, rpcclient.SignerAccount{
|
||||
Signer: transaction.Signer{
|
||||
Account: c.Accounts[i].Contract.ScriptHash(),
|
||||
Account: acc.Contract.ScriptHash(),
|
||||
Scopes: transaction.CustomContracts,
|
||||
AllowedContracts: []util.Uint160{neoHash},
|
||||
AllowedContracts: []util.Uint160{neo.Hash},
|
||||
},
|
||||
Account: c.Accounts[i],
|
||||
Account: acc,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -74,8 +65,8 @@ func (c *initializeContext) registerCandidates() error {
|
|||
}
|
||||
|
||||
network := c.CommitteeAct.GetNetwork()
|
||||
for i := range c.Accounts {
|
||||
if err := c.Accounts[i].SignTx(network, tx); err != nil {
|
||||
for _, acc := range c.Accounts[start:end] {
|
||||
if err := acc.SignTx(network, tx); err != nil {
|
||||
return fmt.Errorf("can't sign a transaction: %w", err)
|
||||
}
|
||||
}
|
||||
|
@ -83,6 +74,39 @@ func (c *initializeContext) registerCandidates() error {
|
|||
return c.sendTx(tx, c.Command, true)
|
||||
}
|
||||
|
||||
func (c *initializeContext) registerCandidates() error {
|
||||
cc, err := unwrap.Array(c.ReadOnlyInvoker.Call(neo.Hash, "getCandidates"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("`getCandidates`: %w", err)
|
||||
}
|
||||
|
||||
need := len(c.Accounts)
|
||||
have := len(cc)
|
||||
|
||||
if need == have {
|
||||
c.Command.Println("Candidates are already registered.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Register candidates in batches in order to overcome the signers amount limit.
|
||||
// See: https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/transaction/transaction.go#L27
|
||||
for i := 0; i < need; i += registerBatchSize {
|
||||
start, end := i, i+registerBatchSize
|
||||
if end > need {
|
||||
end = need
|
||||
}
|
||||
// This check is sound because transactions are accepted/rejected atomically.
|
||||
if have >= end {
|
||||
continue
|
||||
}
|
||||
if err := c.registerCandidateRange(start, end); err != nil {
|
||||
return fmt.Errorf("registering candidates %d..%d: %q", start, end-1, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *initializeContext) transferNEOToAlphabetContracts() error {
|
||||
neoHash := neo.Hash
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package morph
|
|||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
@ -37,13 +38,22 @@ func TestInitialize(t *testing.T) {
|
|||
t.Run("7 nodes", func(t *testing.T) {
|
||||
testInitialize(t, 7)
|
||||
})
|
||||
t.Run("16 nodes", func(t *testing.T) {
|
||||
testInitialize(t, 16)
|
||||
})
|
||||
t.Run("max nodes", func(t *testing.T) {
|
||||
testInitialize(t, maxAlphabetNodes)
|
||||
})
|
||||
t.Run("too many nodes", func(t *testing.T) {
|
||||
require.ErrorIs(t, generateTestData(t, t.TempDir(), maxAlphabetNodes+1), ErrTooManyAlphabetNodes)
|
||||
})
|
||||
}
|
||||
|
||||
func testInitialize(t *testing.T, committeeSize int) {
|
||||
testdataDir := t.TempDir()
|
||||
v := viper.GetViper()
|
||||
|
||||
generateTestData(t, testdataDir, committeeSize)
|
||||
require.NoError(t, generateTestData(t, testdataDir, committeeSize))
|
||||
v.Set(protoConfigPath, filepath.Join(testdataDir, protoFileName))
|
||||
|
||||
// Set to the path or remove the next statement to download from the network.
|
||||
|
@ -74,25 +84,33 @@ func testInitialize(t *testing.T, committeeSize int) {
|
|||
})
|
||||
}
|
||||
|
||||
func generateTestData(t *testing.T, dir string, size int) {
|
||||
func generateTestData(t *testing.T, dir string, size int) error {
|
||||
v := viper.GetViper()
|
||||
v.Set(alphabetWalletsFlag, dir)
|
||||
|
||||
sizeStr := strconv.FormatUint(uint64(size), 10)
|
||||
require.NoError(t, generateAlphabetCmd.Flags().Set(alphabetSizeFlag, sizeStr))
|
||||
if err := generateAlphabetCmd.Flags().Set(alphabetSizeFlag, sizeStr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
setTestCredentials(v, size)
|
||||
require.NoError(t, generateAlphabetCreds(generateAlphabetCmd, nil))
|
||||
if err := generateAlphabetCreds(generateAlphabetCmd, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pubs []string
|
||||
for i := 0; i < size; i++ {
|
||||
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
|
||||
w, err := wallet.NewWalletFromFile(p)
|
||||
require.NoError(t, err, "wallet doesn't exist")
|
||||
if err != nil {
|
||||
return fmt.Errorf("wallet doesn't exist: %w", err)
|
||||
}
|
||||
for _, acc := range w.Accounts {
|
||||
if acc.Label == singleAccountName {
|
||||
pub, ok := vm.ParseSignatureContract(acc.Contract.Script)
|
||||
require.True(t, ok)
|
||||
if !ok {
|
||||
return fmt.Errorf("could not parse signature script for %s", acc.Address)
|
||||
}
|
||||
pubs = append(pubs, hex.EncodeToString(pub))
|
||||
continue
|
||||
}
|
||||
|
@ -101,16 +119,18 @@ func generateTestData(t *testing.T, dir string, size int) {
|
|||
|
||||
cfg := config.Config{}
|
||||
cfg.ProtocolConfiguration.Magic = 12345
|
||||
cfg.ProtocolConfiguration.ValidatorsCount = size
|
||||
cfg.ProtocolConfiguration.ValidatorsCount = uint32(size)
|
||||
cfg.ProtocolConfiguration.TimePerBlock = time.Second
|
||||
cfg.ProtocolConfiguration.StandbyCommittee = pubs // sorted by glagolic letters
|
||||
cfg.ProtocolConfiguration.P2PSigExtensions = true
|
||||
cfg.ProtocolConfiguration.VerifyTransactions = true
|
||||
data, err := yaml.Marshal(cfg)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
protoPath := filepath.Join(dir, protoFileName)
|
||||
require.NoError(t, os.WriteFile(protoPath, data, os.ModePerm))
|
||||
return os.WriteFile(protoPath, data, os.ModePerm)
|
||||
}
|
||||
|
||||
func setTestCredentials(v *viper.Viper, size int) {
|
||||
|
|
|
@ -51,7 +51,7 @@ type localClient struct {
|
|||
maxGasInvoke int64
|
||||
}
|
||||
|
||||
func newLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) (*localClient, error) {
|
||||
func newLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*localClient, error) {
|
||||
cfg, err := config.LoadFile(v.GetString(protoConfigPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -62,7 +62,7 @@ func newLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
|
|||
return nil, err
|
||||
}
|
||||
|
||||
m := smartcontract.GetDefaultHonestNodeCount(cfg.ProtocolConfiguration.ValidatorsCount)
|
||||
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount))
|
||||
accounts := make([]*wallet.Account, len(wallets))
|
||||
for i := range accounts {
|
||||
accounts[i], err = getWalletAccount(wallets[i], consensusAccountName)
|
||||
|
@ -87,7 +87,6 @@ func newLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
|
|||
|
||||
go bc.Run()
|
||||
|
||||
dumpPath := v.GetString(localDumpFlag)
|
||||
if cmd.Name() != "init" {
|
||||
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
|
|
|
@ -77,7 +77,6 @@ var (
|
|||
_ = viper.BindPFlag(containerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag))
|
||||
_ = viper.BindPFlag(withdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag))
|
||||
_ = viper.BindPFlag(protoConfigPath, cmd.Flags().Lookup(protoConfigPath))
|
||||
_ = viper.BindPFlag(localDumpFlag, cmd.Flags().Lookup(localDumpFlag))
|
||||
},
|
||||
RunE: initializeSideChainCmd,
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
|
||||
)
|
||||
|
@ -37,8 +37,8 @@ func (x BalanceOfRes) Balance() accounting.Decimal {
|
|||
// BalanceOf requests the current balance of a FrostFS user.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func BalanceOf(prm BalanceOfPrm) (res BalanceOfRes, err error) {
|
||||
res.cliRes, err = prm.cli.BalanceGet(context.Background(), prm.PrmBalanceGet)
|
||||
func BalanceOf(ctx context.Context, prm BalanceOfPrm) (res BalanceOfRes, err error) {
|
||||
res.cliRes, err = prm.cli.BalanceGet(ctx, prm.PrmBalanceGet)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -62,8 +62,8 @@ func (x ListContainersRes) IDList() []cid.ID {
|
|||
// ListContainers requests a list of FrostFS user's containers.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func ListContainers(prm ListContainersPrm) (res ListContainersRes, err error) {
|
||||
res.cliRes, err = prm.cli.ContainerList(context.Background(), prm.PrmContainerList)
|
||||
func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContainersRes, err error) {
|
||||
res.cliRes, err = prm.cli.ContainerList(ctx, prm.PrmContainerList)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -92,8 +92,8 @@ func (x PutContainerRes) ID() cid.ID {
|
|||
// Success can be verified by reading by identifier.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func PutContainer(prm PutContainerPrm) (res PutContainerRes, err error) {
|
||||
cliRes, err := prm.cli.ContainerPut(context.Background(), prm.PrmContainerPut)
|
||||
func PutContainer(ctx context.Context, prm PutContainerPrm) (res PutContainerRes, err error) {
|
||||
cliRes, err := prm.cli.ContainerPut(ctx, prm.PrmContainerPut)
|
||||
if err == nil {
|
||||
res.cnr = cliRes.ID()
|
||||
}
|
||||
|
@ -125,20 +125,20 @@ func (x GetContainerRes) Container() containerSDK.Container {
|
|||
// GetContainer reads a container from FrostFS by ID.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func GetContainer(prm GetContainerPrm) (res GetContainerRes, err error) {
|
||||
res.cliRes, err = prm.cli.ContainerGet(context.Background(), prm.cliPrm)
|
||||
func GetContainer(ctx context.Context, prm GetContainerPrm) (res GetContainerRes, err error) {
|
||||
res.cliRes, err = prm.cli.ContainerGet(ctx, prm.cliPrm)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// IsACLExtendable checks if ACL of the container referenced by the given identifier
|
||||
// can be extended. Client connection MUST BE correctly established in advance.
|
||||
func IsACLExtendable(c *client.Client, cnr cid.ID) (bool, error) {
|
||||
func IsACLExtendable(ctx context.Context, c *client.Client, cnr cid.ID) (bool, error) {
|
||||
var prm GetContainerPrm
|
||||
prm.SetClient(c)
|
||||
prm.SetContainer(cnr)
|
||||
|
||||
res, err := GetContainer(prm)
|
||||
res, err := GetContainer(ctx, prm)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("get container from the FrostFS: %w", err)
|
||||
}
|
||||
|
@ -163,8 +163,8 @@ type DeleteContainerRes struct{}
|
|||
// Success can be verified by reading by identifier.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func DeleteContainer(prm DeleteContainerPrm) (res DeleteContainerRes, err error) {
|
||||
_, err = prm.cli.ContainerDelete(context.Background(), prm.PrmContainerDelete)
|
||||
func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteContainerRes, err error) {
|
||||
_, err = prm.cli.ContainerDelete(ctx, prm.PrmContainerDelete)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -188,8 +188,8 @@ func (x EACLRes) EACL() eacl.Table {
|
|||
// EACL reads eACL table from FrostFS by container ID.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func EACL(prm EACLPrm) (res EACLRes, err error) {
|
||||
res.cliRes, err = prm.cli.ContainerEACL(context.Background(), prm.PrmContainerEACL)
|
||||
func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) {
|
||||
res.cliRes, err = prm.cli.ContainerEACL(ctx, prm.PrmContainerEACL)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -211,8 +211,8 @@ type SetEACLRes struct{}
|
|||
// Success can be verified by reading by container identifier.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func SetEACL(prm SetEACLPrm) (res SetEACLRes, err error) {
|
||||
_, err = prm.cli.ContainerSetEACL(context.Background(), prm.PrmContainerSetEACL)
|
||||
func SetEACL(ctx context.Context, prm SetEACLPrm) (res SetEACLRes, err error) {
|
||||
_, err = prm.cli.ContainerSetEACL(ctx, prm.PrmContainerSetEACL)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -236,8 +236,8 @@ func (x NetworkInfoRes) NetworkInfo() netmap.NetworkInfo {
|
|||
// NetworkInfo reads information about the FrostFS network.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func NetworkInfo(prm NetworkInfoPrm) (res NetworkInfoRes, err error) {
|
||||
res.cliRes, err = prm.cli.NetworkInfo(context.Background(), prm.PrmNetworkInfo)
|
||||
func NetworkInfo(ctx context.Context, prm NetworkInfoPrm) (res NetworkInfoRes, err error) {
|
||||
res.cliRes, err = prm.cli.NetworkInfo(ctx, prm.PrmNetworkInfo)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -266,8 +266,8 @@ func (x NodeInfoRes) LatestVersion() version.Version {
|
|||
// NodeInfo requests information about the remote server from FrostFS netmap.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func NodeInfo(prm NodeInfoPrm) (res NodeInfoRes, err error) {
|
||||
res.cliRes, err = prm.cli.EndpointInfo(context.Background(), prm.PrmEndpointInfo)
|
||||
func NodeInfo(ctx context.Context, prm NodeInfoPrm) (res NodeInfoRes, err error) {
|
||||
res.cliRes, err = prm.cli.EndpointInfo(ctx, prm.PrmEndpointInfo)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -290,8 +290,8 @@ func (x NetMapSnapshotRes) NetMap() netmap.NetMap {
|
|||
// NetMapSnapshot requests current network view of the remote server.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func NetMapSnapshot(prm NetMapSnapshotPrm) (res NetMapSnapshotRes, err error) {
|
||||
res.cliRes, err = prm.cli.NetMapSnapshot(context.Background(), client.PrmNetMapSnapshot{})
|
||||
func NetMapSnapshot(ctx context.Context, prm NetMapSnapshotPrm) (res NetMapSnapshotRes, err error) {
|
||||
res.cliRes, err = prm.cli.NetMapSnapshot(ctx, client.PrmNetMapSnapshot{})
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -319,8 +319,8 @@ func (x CreateSessionRes) SessionKey() []byte {
|
|||
// CreateSession opens a new unlimited session with the remote node.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func CreateSession(prm CreateSessionPrm) (res CreateSessionRes, err error) {
|
||||
res.cliRes, err = prm.cli.SessionCreate(context.Background(), prm.PrmSessionCreate)
|
||||
func CreateSession(ctx context.Context, prm CreateSessionPrm) (res CreateSessionRes, err error) {
|
||||
res.cliRes, err = prm.cli.SessionCreate(ctx, prm.PrmSessionCreate)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -329,15 +329,19 @@ func CreateSession(prm CreateSessionPrm) (res CreateSessionRes, err error) {
|
|||
type PutObjectPrm struct {
|
||||
commonObjectPrm
|
||||
|
||||
hdr *object.Object
|
||||
copyNum []uint32
|
||||
|
||||
hdr *objectSDK.Object
|
||||
|
||||
rdr io.Reader
|
||||
|
||||
headerCallback func(*object.Object)
|
||||
headerCallback func(*objectSDK.Object)
|
||||
|
||||
prepareLocally bool
|
||||
}
|
||||
|
||||
// SetHeader sets object header.
|
||||
func (x *PutObjectPrm) SetHeader(hdr *object.Object) {
|
||||
func (x *PutObjectPrm) SetHeader(hdr *objectSDK.Object) {
|
||||
x.hdr = hdr
|
||||
}
|
||||
|
||||
|
@ -348,10 +352,51 @@ func (x *PutObjectPrm) SetPayloadReader(rdr io.Reader) {
|
|||
|
||||
// SetHeaderCallback sets callback which is called on the object after the header is received
|
||||
// but before the payload is written.
|
||||
func (x *PutObjectPrm) SetHeaderCallback(f func(*object.Object)) {
|
||||
func (x *PutObjectPrm) SetHeaderCallback(f func(*objectSDK.Object)) {
|
||||
x.headerCallback = f
|
||||
}
|
||||
|
||||
// SetCopiesNumberByVectors sets ordered list of minimal required object copies numbers
|
||||
// per placement vector.
|
||||
func (x *PutObjectPrm) SetCopiesNumberByVectors(copiesNumbers []uint32) {
|
||||
x.copyNum = copiesNumbers
|
||||
}
|
||||
|
||||
// PrepareLocally generate object header on the client side.
|
||||
// For big object - split locally too.
|
||||
func (x *PutObjectPrm) PrepareLocally() {
|
||||
x.prepareLocally = true
|
||||
}
|
||||
|
||||
func (x *PutObjectPrm) convertToSDKPrm(ctx context.Context) (client.PrmObjectPutInit, error) {
|
||||
var putPrm client.PrmObjectPutInit
|
||||
if !x.prepareLocally && x.sessionToken != nil {
|
||||
putPrm.WithinSession(*x.sessionToken)
|
||||
}
|
||||
|
||||
if x.bearerToken != nil {
|
||||
putPrm.WithBearerToken(*x.bearerToken)
|
||||
}
|
||||
|
||||
if x.local {
|
||||
putPrm.MarkLocal()
|
||||
}
|
||||
|
||||
putPrm.WithXHeaders(x.xHeaders...)
|
||||
putPrm.SetCopiesNumberByVectors(x.copyNum)
|
||||
|
||||
if x.prepareLocally {
|
||||
res, err := x.cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
|
||||
if err != nil {
|
||||
return client.PrmObjectPutInit{}, err
|
||||
}
|
||||
putPrm.WithObjectMaxSize(res.Info().MaxObjectSize())
|
||||
putPrm.WithEpochSource(epochSource(res.Info().CurrentEpoch()))
|
||||
putPrm.WithoutHomomorphicHash(res.Info().HomomorphicHashingDisabled())
|
||||
}
|
||||
return putPrm, nil
|
||||
}
|
||||
|
||||
// PutObjectRes groups the resulting values of PutObject operation.
|
||||
type PutObjectRes struct {
|
||||
id oid.ID
|
||||
|
@ -362,32 +407,26 @@ func (x PutObjectRes) ID() oid.ID {
|
|||
return x.id
|
||||
}
|
||||
|
||||
type epochSource uint64
|
||||
|
||||
func (s epochSource) CurrentEpoch() uint64 {
|
||||
return uint64(s)
|
||||
}
|
||||
|
||||
// PutObject saves the object in FrostFS network.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func PutObject(prm PutObjectPrm) (*PutObjectRes, error) {
|
||||
var putPrm client.PrmObjectPutInit
|
||||
|
||||
if prm.sessionToken != nil {
|
||||
putPrm.WithinSession(*prm.sessionToken)
|
||||
func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
|
||||
sdkPrm, err := prm.convertToSDKPrm(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create parameters of object put operation: %w", err)
|
||||
}
|
||||
|
||||
if prm.bearerToken != nil {
|
||||
putPrm.WithBearerToken(*prm.bearerToken)
|
||||
}
|
||||
|
||||
if prm.local {
|
||||
putPrm.MarkLocal()
|
||||
}
|
||||
|
||||
putPrm.WithXHeaders(prm.xHeaders...)
|
||||
|
||||
wrt, err := prm.cli.ObjectPutInit(context.Background(), putPrm)
|
||||
wrt, err := prm.cli.ObjectPutInit(ctx, sdkPrm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init object writing: %w", err)
|
||||
}
|
||||
|
||||
if wrt.WriteHeader(*prm.hdr) {
|
||||
if wrt.WriteHeader(ctx, *prm.hdr) {
|
||||
if prm.headerCallback != nil {
|
||||
prm.headerCallback(prm.hdr)
|
||||
}
|
||||
|
@ -417,7 +456,7 @@ func PutObject(prm PutObjectPrm) (*PutObjectRes, error) {
|
|||
for {
|
||||
n, err = prm.rdr.Read(buf)
|
||||
if n > 0 {
|
||||
if !wrt.WritePayloadChunk(buf[:n]) {
|
||||
if !wrt.WritePayloadChunk(ctx, buf[:n]) {
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -433,7 +472,7 @@ func PutObject(prm PutObjectPrm) (*PutObjectRes, error) {
|
|||
}
|
||||
}
|
||||
|
||||
cliRes, err := wrt.Close()
|
||||
cliRes, err := wrt.Close(ctx)
|
||||
if err != nil { // here err already carries both status and client errors
|
||||
return nil, fmt.Errorf("client failure: %w", err)
|
||||
}
|
||||
|
@ -462,7 +501,7 @@ func (x DeleteObjectRes) Tombstone() oid.ID {
|
|||
// DeleteObject marks an object to be removed from FrostFS through tombstone placement.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func DeleteObject(prm DeleteObjectPrm) (*DeleteObjectRes, error) {
|
||||
func DeleteObject(ctx context.Context, prm DeleteObjectPrm) (*DeleteObjectRes, error) {
|
||||
var delPrm client.PrmObjectDelete
|
||||
delPrm.FromContainer(prm.objAddr.Container())
|
||||
delPrm.ByID(prm.objAddr.Object())
|
||||
|
@ -477,7 +516,7 @@ func DeleteObject(prm DeleteObjectPrm) (*DeleteObjectRes, error) {
|
|||
|
||||
delPrm.WithXHeaders(prm.xHeaders...)
|
||||
|
||||
cliRes, err := prm.cli.ObjectDelete(context.Background(), delPrm)
|
||||
cliRes, err := prm.cli.ObjectDelete(ctx, delPrm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("remove object via client: %w", err)
|
||||
}
|
||||
|
@ -493,22 +532,22 @@ type GetObjectPrm struct {
|
|||
objectAddressPrm
|
||||
rawPrm
|
||||
payloadWriterPrm
|
||||
headerCallback func(*object.Object)
|
||||
headerCallback func(*objectSDK.Object)
|
||||
}
|
||||
|
||||
// SetHeaderCallback sets callback which is called on the object after the header is received
|
||||
// but before the payload is written.
|
||||
func (p *GetObjectPrm) SetHeaderCallback(f func(*object.Object)) {
|
||||
func (p *GetObjectPrm) SetHeaderCallback(f func(*objectSDK.Object)) {
|
||||
p.headerCallback = f
|
||||
}
|
||||
|
||||
// GetObjectRes groups the resulting values of GetObject operation.
|
||||
type GetObjectRes struct {
|
||||
hdr *object.Object
|
||||
hdr *objectSDK.Object
|
||||
}
|
||||
|
||||
// Header returns the header of the request object.
|
||||
func (x GetObjectRes) Header() *object.Object {
|
||||
func (x GetObjectRes) Header() *objectSDK.Object {
|
||||
return x.hdr
|
||||
}
|
||||
|
||||
|
@ -518,7 +557,7 @@ func (x GetObjectRes) Header() *object.Object {
|
|||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
// For raw reading, returns *object.SplitInfoError error if object is virtual.
|
||||
func GetObject(prm GetObjectPrm) (*GetObjectRes, error) {
|
||||
func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
|
||||
var getPrm client.PrmObjectGet
|
||||
getPrm.FromContainer(prm.objAddr.Container())
|
||||
getPrm.ByID(prm.objAddr.Object())
|
||||
|
@ -541,12 +580,12 @@ func GetObject(prm GetObjectPrm) (*GetObjectRes, error) {
|
|||
|
||||
getPrm.WithXHeaders(prm.xHeaders...)
|
||||
|
||||
rdr, err := prm.cli.ObjectGetInit(context.Background(), getPrm)
|
||||
rdr, err := prm.cli.ObjectGetInit(ctx, getPrm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init object reading on client: %w", err)
|
||||
}
|
||||
|
||||
var hdr object.Object
|
||||
var hdr objectSDK.Object
|
||||
|
||||
if !rdr.ReadHeader(&hdr) {
|
||||
_, err = rdr.Close()
|
||||
|
@ -582,11 +621,11 @@ func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) {
|
|||
|
||||
// HeadObjectRes groups the resulting values of HeadObject operation.
|
||||
type HeadObjectRes struct {
|
||||
hdr *object.Object
|
||||
hdr *objectSDK.Object
|
||||
}
|
||||
|
||||
// Header returns the requested object header.
|
||||
func (x HeadObjectRes) Header() *object.Object {
|
||||
func (x HeadObjectRes) Header() *objectSDK.Object {
|
||||
return x.hdr
|
||||
}
|
||||
|
||||
|
@ -594,7 +633,7 @@ func (x HeadObjectRes) Header() *object.Object {
|
|||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
// For raw reading, returns *object.SplitInfoError error if object is virtual.
|
||||
func HeadObject(prm HeadObjectPrm) (*HeadObjectRes, error) {
|
||||
func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) {
|
||||
var cliPrm client.PrmObjectHead
|
||||
cliPrm.FromContainer(prm.objAddr.Container())
|
||||
cliPrm.ByID(prm.objAddr.Object())
|
||||
|
@ -617,12 +656,12 @@ func HeadObject(prm HeadObjectPrm) (*HeadObjectRes, error) {
|
|||
|
||||
cliPrm.WithXHeaders(prm.xHeaders...)
|
||||
|
||||
res, err := prm.cli.ObjectHead(context.Background(), cliPrm)
|
||||
res, err := prm.cli.ObjectHead(ctx, cliPrm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read object header via client: %w", err)
|
||||
}
|
||||
|
||||
var hdr object.Object
|
||||
var hdr objectSDK.Object
|
||||
|
||||
if !res.ReadHeader(&hdr) {
|
||||
return nil, fmt.Errorf("missing header in response")
|
||||
|
@ -638,11 +677,11 @@ type SearchObjectsPrm struct {
|
|||
commonObjectPrm
|
||||
containerIDPrm
|
||||
|
||||
filters object.SearchFilters
|
||||
filters objectSDK.SearchFilters
|
||||
}
|
||||
|
||||
// SetFilters sets search filters.
|
||||
func (x *SearchObjectsPrm) SetFilters(filters object.SearchFilters) {
|
||||
func (x *SearchObjectsPrm) SetFilters(filters objectSDK.SearchFilters) {
|
||||
x.filters = filters
|
||||
}
|
||||
|
||||
|
@ -659,7 +698,7 @@ func (x SearchObjectsRes) IDList() []oid.ID {
|
|||
// SearchObjects selects objects from the container which match the filters.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func SearchObjects(prm SearchObjectsPrm) (*SearchObjectsRes, error) {
|
||||
func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes, error) {
|
||||
var cliPrm client.PrmObjectSearch
|
||||
cliPrm.InContainer(prm.cnrID)
|
||||
cliPrm.SetFilters(prm.filters)
|
||||
|
@ -678,7 +717,7 @@ func SearchObjects(prm SearchObjectsPrm) (*SearchObjectsRes, error) {
|
|||
|
||||
cliPrm.WithXHeaders(prm.xHeaders...)
|
||||
|
||||
rdr, err := prm.cli.ObjectSearchInit(context.Background(), cliPrm)
|
||||
rdr, err := prm.cli.ObjectSearchInit(ctx, cliPrm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init object search: %w", err)
|
||||
}
|
||||
|
@ -715,7 +754,7 @@ type HashPayloadRangesPrm struct {
|
|||
|
||||
tz bool
|
||||
|
||||
rngs []*object.Range
|
||||
rngs []*objectSDK.Range
|
||||
|
||||
salt []byte
|
||||
}
|
||||
|
@ -726,7 +765,7 @@ func (x *HashPayloadRangesPrm) TZ() {
|
|||
}
|
||||
|
||||
// SetRanges sets a list of payload ranges to hash.
|
||||
func (x *HashPayloadRangesPrm) SetRanges(rngs []*object.Range) {
|
||||
func (x *HashPayloadRangesPrm) SetRanges(rngs []*objectSDK.Range) {
|
||||
x.rngs = rngs
|
||||
}
|
||||
|
||||
|
@ -749,7 +788,7 @@ func (x HashPayloadRangesRes) HashList() [][]byte {
|
|||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
// Returns an error if number of received hashes differs with the number of requested ranges.
|
||||
func HashPayloadRanges(prm HashPayloadRangesPrm) (*HashPayloadRangesRes, error) {
|
||||
func HashPayloadRanges(ctx context.Context, prm HashPayloadRangesPrm) (*HashPayloadRangesRes, error) {
|
||||
var cliPrm client.PrmObjectHash
|
||||
cliPrm.FromContainer(prm.objAddr.Container())
|
||||
cliPrm.ByID(prm.objAddr.Object())
|
||||
|
@ -783,7 +822,7 @@ func HashPayloadRanges(prm HashPayloadRangesPrm) (*HashPayloadRangesRes, error)
|
|||
|
||||
cliPrm.WithXHeaders(prm.xHeaders...)
|
||||
|
||||
res, err := prm.cli.ObjectHash(context.Background(), cliPrm)
|
||||
res, err := prm.cli.ObjectHash(ctx, cliPrm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read payload hashes via client: %w", err)
|
||||
}
|
||||
|
@ -800,11 +839,11 @@ type PayloadRangePrm struct {
|
|||
rawPrm
|
||||
payloadWriterPrm
|
||||
|
||||
rng *object.Range
|
||||
rng *objectSDK.Range
|
||||
}
|
||||
|
||||
// SetRange sets payload range to read.
|
||||
func (x *PayloadRangePrm) SetRange(rng *object.Range) {
|
||||
func (x *PayloadRangePrm) SetRange(rng *objectSDK.Range) {
|
||||
x.rng = rng
|
||||
}
|
||||
|
||||
|
@ -817,7 +856,7 @@ type PayloadRangeRes struct{}
|
|||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
// For raw reading, returns *object.SplitInfoError error if object is virtual.
|
||||
func PayloadRange(prm PayloadRangePrm) (*PayloadRangeRes, error) {
|
||||
func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) {
|
||||
var cliPrm client.PrmObjectRange
|
||||
cliPrm.FromContainer(prm.objAddr.Container())
|
||||
cliPrm.ByID(prm.objAddr.Object())
|
||||
|
@ -843,7 +882,7 @@ func PayloadRange(prm PayloadRangePrm) (*PayloadRangeRes, error) {
|
|||
|
||||
cliPrm.WithXHeaders(prm.xHeaders...)
|
||||
|
||||
rdr, err := prm.cli.ObjectRangeInit(context.Background(), cliPrm)
|
||||
rdr, err := prm.cli.ObjectRangeInit(ctx, cliPrm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init payload reading: %w", err)
|
||||
}
|
||||
|
@ -877,12 +916,12 @@ type SyncContainerRes struct{}
|
|||
// Interrupts on any writer error.
|
||||
//
|
||||
// Panics if a container passed as a parameter is nil.
|
||||
func SyncContainerSettings(prm SyncContainerPrm) (*SyncContainerRes, error) {
|
||||
func SyncContainerSettings(ctx context.Context, prm SyncContainerPrm) (*SyncContainerRes, error) {
|
||||
if prm.c == nil {
|
||||
panic("sync container settings with the network: nil container")
|
||||
}
|
||||
|
||||
err := client.SyncContainerWithNetwork(context.Background(), prm.c, prm.cli)
|
||||
err := client.SyncContainerWithNetwork(ctx, prm.c, prm.cli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -12,9 +12,11 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var errInvalidEndpoint = errors.New("provided RPC endpoint is incorrect")
|
||||
|
@ -59,6 +61,9 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
|
|||
|
||||
common.PrintVerbose(cmd, "Set request timeout to %s.", timeout)
|
||||
}
|
||||
prmDial.SetGRPCDialOptions(
|
||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()))
|
||||
|
||||
c.Init(prmInit)
|
||||
|
||||
|
|
62
cmd/frostfs-cli/internal/common/tracing.go
Normal file
62
cmd/frostfs-cli/internal/common/tracing.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"github.com/spf13/cobra"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type spanKey struct{}
|
||||
|
||||
// StopClientCommandSpan stops tracing span for the command and prints trace ID on the standard output.
|
||||
func StopClientCommandSpan(cmd *cobra.Command, _ []string) {
|
||||
span, ok := cmd.Context().Value(spanKey{}).(trace.Span)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
span.End()
|
||||
|
||||
// Noop provider cannot fail on flush.
|
||||
_ = tracing.Shutdown(cmd.Context())
|
||||
|
||||
cmd.PrintErrf("Trace ID: %s\n", span.SpanContext().TraceID())
|
||||
}
|
||||
|
||||
// StartClientCommandSpan starts tracing span for the command.
|
||||
func StartClientCommandSpan(cmd *cobra.Command) {
|
||||
enableTracing, err := cmd.Flags().GetBool(commonflags.TracingFlag)
|
||||
if err != nil || !enableTracing {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = tracing.Setup(cmd.Context(), tracing.Config{
|
||||
Enabled: true,
|
||||
Exporter: tracing.NoOpExporter,
|
||||
Service: "frostfs-cli",
|
||||
Version: misc.Version,
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "init tracing: %w", err)
|
||||
|
||||
var components sort.StringSlice
|
||||
for c := cmd; c != nil; c = c.Parent() {
|
||||
components = append(components, c.Name())
|
||||
}
|
||||
for i, j := 0, len(components)-1; i < j; {
|
||||
components.Swap(i, j)
|
||||
i++
|
||||
j--
|
||||
}
|
||||
|
||||
operation := strings.Join(components, ".")
|
||||
ctx, span := tracing.StartSpanFromContext(cmd.Context(), operation)
|
||||
ctx = context.WithValue(ctx, spanKey{}, span)
|
||||
cmd.SetContext(ctx)
|
||||
}
|
|
@ -47,6 +47,9 @@ const (
|
|||
|
||||
OIDFlag = "oid"
|
||||
OIDFlagUsage = "Object ID."
|
||||
|
||||
TracingFlag = "trace"
|
||||
TracingFlagUsage = "Generate trace ID and print it."
|
||||
)
|
||||
|
||||
// Init adds common flags to the command:
|
||||
|
@ -54,12 +57,14 @@ const (
|
|||
// - WalletPath,
|
||||
// - Account,
|
||||
// - RPC,
|
||||
// - Tracing,
|
||||
// - Timeout.
|
||||
func Init(cmd *cobra.Command) {
|
||||
InitWithoutRPC(cmd)
|
||||
|
||||
ff := cmd.Flags()
|
||||
ff.StringP(RPC, RPCShorthand, RPCDefault, RPCUsage)
|
||||
ff.Bool(TracingFlag, false, TracingFlagUsage)
|
||||
ff.DurationP(Timeout, TimeoutShorthand, TimeoutDefault, TimeoutUsage)
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ var accountingBalanceCmd = &cobra.Command{
|
|||
prm.SetClient(cli)
|
||||
prm.SetAccount(idUser)
|
||||
|
||||
res, err := internalclient.BalanceOf(prm)
|
||||
res, err := internalclient.BalanceOf(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
// print to stdout
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package accounting
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -17,7 +18,9 @@ var Cmd = &cobra.Command{
|
|||
_ = viper.BindPFlag(commonflags.WalletPath, flags.Lookup(commonflags.WalletPath))
|
||||
_ = viper.BindPFlag(commonflags.Account, flags.Lookup(commonflags.Account))
|
||||
_ = viper.BindPFlag(commonflags.RPC, flags.Lookup(commonflags.RPC))
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -24,6 +24,7 @@ const (
|
|||
ownerFlag = "owner"
|
||||
outFlag = "out"
|
||||
jsonFlag = commonflags.JSON
|
||||
impersonateFlag = "impersonate"
|
||||
)
|
||||
|
||||
var createCmd = &cobra.Command{
|
||||
|
@ -39,19 +40,20 @@ is set to current epoch + n.
|
|||
}
|
||||
|
||||
func init() {
|
||||
createCmd.Flags().StringP(eaclFlag, "e", "", "Path to the extended ACL table")
|
||||
createCmd.Flags().StringP(issuedAtFlag, "i", "", "Epoch to issue token at")
|
||||
createCmd.Flags().StringP(notValidBeforeFlag, "n", "", "Not valid before epoch")
|
||||
createCmd.Flags().StringP(eaclFlag, "e", "", "Path to the extended ACL table (mutually exclusive with --impersonate flag)")
|
||||
createCmd.Flags().StringP(issuedAtFlag, "i", "+0", "Epoch to issue token at")
|
||||
createCmd.Flags().StringP(notValidBeforeFlag, "n", "+0", "Not valid before epoch")
|
||||
createCmd.Flags().StringP(commonflags.ExpireAt, "x", "", "The last active epoch for the token")
|
||||
createCmd.Flags().StringP(ownerFlag, "o", "", "Token owner")
|
||||
createCmd.Flags().String(outFlag, "", "File to write token to")
|
||||
createCmd.Flags().Bool(jsonFlag, false, "Output token in JSON")
|
||||
createCmd.Flags().Bool(impersonateFlag, false, "Mark token as impersonate to consider the token signer as the request owner (mutually exclusive with --eacl flag)")
|
||||
createCmd.Flags().StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage)
|
||||
|
||||
createCmd.MarkFlagsMutuallyExclusive(eaclFlag, impersonateFlag)
|
||||
|
||||
_ = cobra.MarkFlagFilename(createCmd.Flags(), eaclFlag)
|
||||
|
||||
_ = cobra.MarkFlagRequired(createCmd.Flags(), issuedAtFlag)
|
||||
_ = cobra.MarkFlagRequired(createCmd.Flags(), notValidBeforeFlag)
|
||||
_ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.ExpireAt)
|
||||
_ = cobra.MarkFlagRequired(createCmd.Flags(), ownerFlag)
|
||||
_ = cobra.MarkFlagRequired(createCmd.Flags(), outFlag)
|
||||
|
@ -68,10 +70,14 @@ func createToken(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err)
|
||||
|
||||
if iatRelative || expRelative || nvbRelative {
|
||||
endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
|
||||
if len(endpoint) == 0 {
|
||||
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
|
||||
defer cancel()
|
||||
|
||||
endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
|
||||
currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint)
|
||||
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", err)
|
||||
|
||||
|
@ -101,6 +107,9 @@ func createToken(cmd *cobra.Command, _ []string) {
|
|||
b.SetIat(iat)
|
||||
b.ForUser(ownerID)
|
||||
|
||||
impersonate, _ := cmd.Flags().GetBool(impersonateFlag)
|
||||
b.SetImpersonate(impersonate)
|
||||
|
||||
eaclPath, _ := cmd.Flags().GetString(eaclFlag)
|
||||
if eaclPath != "" {
|
||||
table := eaclSDK.NewTable()
|
||||
|
|
|
@ -48,7 +48,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
|
|||
var prm internalclient.NetMapSnapshotPrm
|
||||
prm.SetClient(cli)
|
||||
|
||||
resmap, err := internalclient.NetMapSnapshot(prm)
|
||||
resmap, err := internalclient.NetMapSnapshot(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "unable to get netmap snapshot to validate container placement, "+
|
||||
"use --force option to skip this check: %w", err)
|
||||
|
||||
|
@ -96,7 +96,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
|
|||
syncContainerPrm.SetClient(cli)
|
||||
syncContainerPrm.SetContainer(&cnr)
|
||||
|
||||
_, err = internalclient.SyncContainerSettings(syncContainerPrm)
|
||||
_, err = internalclient.SyncContainerSettings(cmd.Context(), syncContainerPrm)
|
||||
commonCmd.ExitOnErr(cmd, "syncing container's settings rpc error: %w", err)
|
||||
|
||||
var putPrm internalclient.PutContainerPrm
|
||||
|
@ -107,7 +107,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
|
|||
putPrm.WithinSession(*tok)
|
||||
}
|
||||
|
||||
res, err := internalclient.PutContainer(putPrm)
|
||||
res, err := internalclient.PutContainer(cmd.Context(), putPrm)
|
||||
commonCmd.ExitOnErr(cmd, "put container rpc error: %w", err)
|
||||
|
||||
id := res.ID()
|
||||
|
@ -124,7 +124,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
|
|||
for i := 0; i < awaitTimeout; i++ {
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
_, err := internalclient.GetContainer(getPrm)
|
||||
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
||||
if err == nil {
|
||||
cmd.Println("container has been persisted on sidechain")
|
||||
return
|
||||
|
@ -141,6 +141,7 @@ func initContainerCreateCmd() {
|
|||
|
||||
// Init common flags
|
||||
flags.StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage)
|
||||
flags.Bool(commonflags.TracingFlag, false, commonflags.TracingFlagUsage)
|
||||
flags.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage)
|
||||
flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
|
||||
flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
|
||||
|
|
|
@ -34,7 +34,7 @@ Only owner of the container has a permission to remove container.`,
|
|||
getPrm.SetClient(cli)
|
||||
getPrm.SetContainer(id)
|
||||
|
||||
resGet, err := internalclient.GetContainer(getPrm)
|
||||
resGet, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
||||
commonCmd.ExitOnErr(cmd, "can't get the container: %w", err)
|
||||
|
||||
owner := resGet.Container().Owner()
|
||||
|
@ -72,7 +72,7 @@ Only owner of the container has a permission to remove container.`,
|
|||
|
||||
common.PrintVerbose(cmd, "Searching for LOCK objects...")
|
||||
|
||||
res, err := internalclient.SearchObjects(searchPrm)
|
||||
res, err := internalclient.SearchObjects(cmd.Context(), searchPrm)
|
||||
commonCmd.ExitOnErr(cmd, "can't search for LOCK objects: %w", err)
|
||||
|
||||
if len(res.IDList()) != 0 {
|
||||
|
@ -91,7 +91,7 @@ Only owner of the container has a permission to remove container.`,
|
|||
delPrm.WithinSession(*tok)
|
||||
}
|
||||
|
||||
_, err := internalclient.DeleteContainer(delPrm)
|
||||
_, err := internalclient.DeleteContainer(cmd.Context(), delPrm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
cmd.Println("container delete method invoked")
|
||||
|
@ -106,7 +106,7 @@ Only owner of the container has a permission to remove container.`,
|
|||
for i := 0; i < awaitTimeout; i++ {
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
_, err := internalclient.GetContainer(getPrm)
|
||||
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
||||
if err != nil {
|
||||
cmd.Println("container has been removed:", containerID)
|
||||
return
|
||||
|
@ -124,6 +124,7 @@ func initContainerDeleteCmd() {
|
|||
flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
|
||||
flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
|
||||
flags.StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage)
|
||||
flags.Bool(commonflags.TracingFlag, false, commonflags.TracingFlagUsage)
|
||||
|
||||
flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
flags.BoolVar(&containerAwait, "await", false, "Block execution until container is removed")
|
||||
|
|
|
@ -151,7 +151,7 @@ func getContainer(cmd *cobra.Command) (container.Container, *ecdsa.PrivateKey) {
|
|||
prm.SetClient(cli)
|
||||
prm.SetContainer(id)
|
||||
|
||||
res, err := internalclient.GetContainer(prm)
|
||||
res, err := internalclient.GetContainer(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
cnr = res.Container()
|
||||
|
|
|
@ -24,7 +24,7 @@ var getExtendedACLCmd = &cobra.Command{
|
|||
eaclPrm.SetClient(cli)
|
||||
eaclPrm.SetContainer(id)
|
||||
|
||||
res, err := internalclient.EACL(eaclPrm)
|
||||
res, err := internalclient.EACL(cmd.Context(), eaclPrm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
eaclTable := res.EACL()
|
||||
|
|
|
@ -49,7 +49,7 @@ var listContainersCmd = &cobra.Command{
|
|||
prm.SetClient(cli)
|
||||
prm.SetAccount(idUser)
|
||||
|
||||
res, err := internalclient.ListContainers(prm)
|
||||
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
var prmGet internalclient.GetContainerPrm
|
||||
|
@ -63,7 +63,7 @@ var listContainersCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
prmGet.SetContainer(cnrID)
|
||||
res, err := internalclient.GetContainer(prmGet)
|
||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||
if err != nil {
|
||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||
continue
|
||||
|
@ -78,7 +78,8 @@ var listContainersCmd = &cobra.Command{
|
|||
if flagVarListPrintAttr {
|
||||
cnr.IterateAttributes(func(key, val string) {
|
||||
if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
|
||||
// FIXME(@cthulhu-rider): neofs-sdk-go#314 use dedicated method to skip system attributes
|
||||
// FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
|
||||
// Use dedicated method to skip system attributes.
|
||||
cmd.Printf(" %s: %s\n", key, val)
|
||||
}
|
||||
})
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -31,7 +31,7 @@ var listContainerObjectsCmd = &cobra.Command{
|
|||
Run: func(cmd *cobra.Command, args []string) {
|
||||
id := parseContainerID(cmd)
|
||||
|
||||
filters := new(object.SearchFilters)
|
||||
filters := new(objectSDK.SearchFilters)
|
||||
filters.AddRootFilter() // search only user created objects
|
||||
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, key.GetOrGenerate(cmd), commonflags.RPC)
|
||||
|
@ -51,7 +51,7 @@ var listContainerObjectsCmd = &cobra.Command{
|
|||
prmSearch.SetContainerID(id)
|
||||
prmSearch.SetFilters(*filters)
|
||||
|
||||
res, err := internalclient.SearchObjects(prmSearch)
|
||||
res, err := internalclient.SearchObjects(cmd.Context(), prmSearch)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
objectIDs := res.IDList()
|
||||
|
@ -65,13 +65,14 @@ var listContainerObjectsCmd = &cobra.Command{
|
|||
addr.SetObject(objectIDs[i])
|
||||
prmHead.SetAddress(addr)
|
||||
|
||||
resHead, err := internalclient.HeadObject(prmHead)
|
||||
resHead, err := internalclient.HeadObject(cmd.Context(), prmHead)
|
||||
if err == nil {
|
||||
attrs := resHead.Header().Attributes()
|
||||
for i := range attrs {
|
||||
attrKey := attrs[i].Key()
|
||||
if !strings.HasPrefix(attrKey, v2object.SysAttributePrefix) && !strings.HasPrefix(attrKey, v2object.SysAttributePrefixNeoFS) {
|
||||
// FIXME(@cthulhu-rider): neofs-sdk-go#226 use dedicated method to skip system attributes
|
||||
// FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
|
||||
// Use dedicated method to skip system attributes.
|
||||
cmd.Printf(" %s: %s\n", attrKey, attrs[i].Value())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ var containerNodesCmd = &cobra.Command{
|
|||
var prm internalclient.NetMapSnapshotPrm
|
||||
prm.SetClient(cli)
|
||||
|
||||
resmap, err := internalclient.NetMapSnapshot(prm)
|
||||
resmap, err := internalclient.NetMapSnapshot(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "unable to get netmap snapshot", err)
|
||||
|
||||
var id cid.ID
|
||||
|
|
233
cmd/frostfs-cli/modules/container/policy_playground.go
Normal file
233
cmd/frostfs-cli/modules/container/policy_playground.go
Normal file
|
@ -0,0 +1,233 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type policyPlaygroundREPL struct {
|
||||
cmd *cobra.Command
|
||||
args []string
|
||||
nodes map[string]netmap.NodeInfo
|
||||
}
|
||||
|
||||
func newPolicyPlaygroundREPL(cmd *cobra.Command, args []string) (*policyPlaygroundREPL, error) {
|
||||
return &policyPlaygroundREPL{
|
||||
cmd: cmd,
|
||||
args: args,
|
||||
nodes: map[string]netmap.NodeInfo{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) handleLs(args []string) error {
|
||||
if len(args) > 0 {
|
||||
return fmt.Errorf("too many arguments for command 'ls': got %d, want 0", len(args))
|
||||
}
|
||||
i := 1
|
||||
for id, node := range repl.nodes {
|
||||
var attrs []string
|
||||
node.IterateAttributes(func(k, v string) {
|
||||
attrs = append(attrs, fmt.Sprintf("%s:%q", k, v))
|
||||
})
|
||||
fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
|
||||
i++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) handleAdd(args []string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("too few arguments for command 'add': got %d, want >0", len(args))
|
||||
}
|
||||
id := args[0]
|
||||
key, err := hex.DecodeString(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("node id must be a hex string: got %q: %v", id, err)
|
||||
}
|
||||
node := repl.nodes[id]
|
||||
node.SetPublicKey(key)
|
||||
for _, attr := range args[1:] {
|
||||
kv := strings.Split(attr, ":")
|
||||
if len(kv) != 2 {
|
||||
return fmt.Errorf("node attributes must be in the format 'KEY:VALUE': got %q", attr)
|
||||
}
|
||||
node.SetAttribute(kv[0], kv[1])
|
||||
}
|
||||
repl.nodes[id] = node
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) handleLoad(args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("too few arguments for command 'add': got %d, want 1", len(args))
|
||||
}
|
||||
|
||||
jsonNetmap := map[string]map[string]string{}
|
||||
|
||||
b, err := os.ReadFile(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading netmap file %q: %v", args[0], err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &jsonNetmap); err != nil {
|
||||
return fmt.Errorf("decoding json netmap: %v", err)
|
||||
}
|
||||
|
||||
repl.nodes = make(map[string]netmap.NodeInfo)
|
||||
for id, attrs := range jsonNetmap {
|
||||
key, err := hex.DecodeString(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("node id must be a hex string: got %q: %v", id, err)
|
||||
}
|
||||
|
||||
node := repl.nodes[id]
|
||||
node.SetPublicKey(key)
|
||||
for k, v := range attrs {
|
||||
node.SetAttribute(k, v)
|
||||
}
|
||||
repl.nodes[id] = node
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) handleRemove(args []string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("too few arguments for command 'remove': got %d, want >0", len(args))
|
||||
}
|
||||
id := args[0]
|
||||
if _, exists := repl.nodes[id]; exists {
|
||||
delete(repl.nodes, id)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("node not found: id=%q", id)
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) handleEval(args []string) error {
|
||||
policyStr := strings.TrimSpace(strings.Join(args, " "))
|
||||
var nodes [][]netmap.NodeInfo
|
||||
nm := repl.netMap()
|
||||
|
||||
if strings.HasPrefix(policyStr, "CBF") || strings.HasPrefix(policyStr, "SELECT") || strings.HasPrefix(policyStr, "FILTER") {
|
||||
// Assume that the input is a partial SELECT-FILTER expression.
|
||||
// Full inline policies always start with UNIQUE or REP keywords,
|
||||
// or different prefixes when it's the case of an external file.
|
||||
sfExpr, err := netmap.DecodeSelectFilterString(policyStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing select-filter expression: %v", err)
|
||||
}
|
||||
nodes, err = nm.SelectFilterNodes(sfExpr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building select-filter nodes: %v", err)
|
||||
}
|
||||
} else {
|
||||
// Assume that the input is a full policy or input file otherwise.
|
||||
placementPolicy, err := parseContainerPolicy(repl.cmd, policyStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing placement policy: %v", err)
|
||||
}
|
||||
nodes, err = nm.ContainerNodes(*placementPolicy, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building container nodes: %v", err)
|
||||
}
|
||||
}
|
||||
for i, ns := range nodes {
|
||||
var ids []string
|
||||
for _, node := range ns {
|
||||
ids = append(ids, hex.EncodeToString(node.PublicKey()))
|
||||
}
|
||||
fmt.Printf("\t%2d: %v\n", i+1, ids)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
|
||||
var nm netmap.NetMap
|
||||
var nodes []netmap.NodeInfo
|
||||
for _, node := range repl.nodes {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
nm.SetNodes(nodes)
|
||||
return nm
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) run() error {
|
||||
if len(viper.GetString(commonflags.RPC)) > 0 {
|
||||
key := key.GetOrGenerate(repl.cmd)
|
||||
cli := internalclient.GetSDKClientByFlag(repl.cmd, key, commonflags.RPC)
|
||||
|
||||
var prm internalclient.NetMapSnapshotPrm
|
||||
prm.SetClient(cli)
|
||||
|
||||
resp, err := internalclient.NetMapSnapshot(repl.cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(repl.cmd, "unable to get netmap snapshot to populate initial netmap: %w", err)
|
||||
|
||||
for _, node := range resp.NetMap().Nodes() {
|
||||
id := hex.EncodeToString(node.PublicKey())
|
||||
repl.nodes[id] = node
|
||||
}
|
||||
}
|
||||
|
||||
cmdHandlers := map[string]func([]string) error{
|
||||
"list": repl.handleLs,
|
||||
"ls": repl.handleLs,
|
||||
"add": repl.handleAdd,
|
||||
"load": repl.handleLoad,
|
||||
"remove": repl.handleRemove,
|
||||
"rm": repl.handleRemove,
|
||||
"eval": repl.handleEval,
|
||||
}
|
||||
for reader := bufio.NewReader(os.Stdin); ; {
|
||||
fmt.Print("> ")
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("reading line: %v", err)
|
||||
}
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) == 0 {
|
||||
continue
|
||||
}
|
||||
cmd := parts[0]
|
||||
handler, exists := cmdHandlers[cmd]
|
||||
if exists {
|
||||
if err := handler(parts[1:]); err != nil {
|
||||
fmt.Printf("error: %v\n", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("error: unknown command %q\n", cmd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var policyPlaygroundCmd = &cobra.Command{
|
||||
Use: "policy-playground",
|
||||
Short: "A REPL for testing placement policies",
|
||||
Long: `A REPL for testing placement policies.
|
||||
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
repl, err := newPolicyPlaygroundREPL(cmd, args)
|
||||
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
|
||||
},
|
||||
}
|
||||
|
||||
func initContainerPolicyPlaygroundCmd() {
|
||||
commonflags.Init(policyPlaygroundCmd)
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -15,7 +16,9 @@ var Cmd = &cobra.Command{
|
|||
// the viper before execution
|
||||
commonflags.Bind(cmd)
|
||||
commonflags.BindAPI(cmd)
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -28,6 +31,7 @@ func init() {
|
|||
getExtendedACLCmd,
|
||||
setExtendedACLCmd,
|
||||
containerNodesCmd,
|
||||
policyPlaygroundCmd,
|
||||
}
|
||||
|
||||
Cmd.AddCommand(containerChildCommand...)
|
||||
|
@ -40,6 +44,7 @@ func init() {
|
|||
initContainerGetEACLCmd()
|
||||
initContainerSetEACLCmd()
|
||||
initContainerNodesCmd()
|
||||
initContainerPolicyPlaygroundCmd()
|
||||
|
||||
for _, containerCommand := range containerChildCommand {
|
||||
commonflags.InitAPI(containerCommand)
|
||||
|
|
|
@ -38,7 +38,7 @@ Container ID in EACL table will be substituted with ID from the CLI.`,
|
|||
if !flagVarsSetEACL.noPreCheck {
|
||||
cmd.Println("Checking the ability to modify access rights in the container...")
|
||||
|
||||
extendable, err := internalclient.IsACLExtendable(cli, id)
|
||||
extendable, err := internalclient.IsACLExtendable(cmd.Context(), cli, id)
|
||||
commonCmd.ExitOnErr(cmd, "Extensibility check failure: %w", err)
|
||||
|
||||
if !extendable {
|
||||
|
@ -56,7 +56,7 @@ Container ID in EACL table will be substituted with ID from the CLI.`,
|
|||
setEACLPrm.WithinSession(*tok)
|
||||
}
|
||||
|
||||
_, err := internalclient.SetEACL(setEACLPrm)
|
||||
_, err := internalclient.SetEACL(cmd.Context(), setEACLPrm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
if containerAwait {
|
||||
|
@ -72,7 +72,7 @@ Container ID in EACL table will be substituted with ID from the CLI.`,
|
|||
for i := 0; i < awaitTimeout; i++ {
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
res, err := internalclient.EACL(getEACLPrm)
|
||||
res, err := internalclient.EACL(cmd.Context(), getEACLPrm)
|
||||
if err == nil {
|
||||
// compare binary values because EACL could have been set already
|
||||
table := res.EACL()
|
||||
|
|
|
@ -11,10 +11,11 @@ import (
|
|||
const ignoreErrorsFlag = "no-errors"
|
||||
|
||||
var evacuateShardCmd = &cobra.Command{
|
||||
Use: "evacuate",
|
||||
Short: "Evacuate objects from shard",
|
||||
Long: "Evacuate objects from shard to other shards",
|
||||
Run: evacuateShard,
|
||||
Use: "evacuate",
|
||||
Short: "Evacuate objects from shard",
|
||||
Long: "Evacuate objects from shard to other shards",
|
||||
Run: evacuateShard,
|
||||
Deprecated: "use frostfs-cli control shards evacuation start",
|
||||
}
|
||||
|
||||
func evacuateShard(cmd *cobra.Command, _ []string) {
|
||||
|
|
315
cmd/frostfs-cli/modules/control/evacuation.go
Normal file
315
cmd/frostfs-cli/modules/control/evacuation.go
Normal file
|
@ -0,0 +1,315 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
awaitFlag = "await"
|
||||
noProgressFlag = "no-progress"
|
||||
)
|
||||
|
||||
var evacuationShardCmd = &cobra.Command{
|
||||
Use: "evacuation",
|
||||
Short: "Objects evacuation from shard",
|
||||
Long: "Objects evacuation from shard to other shards",
|
||||
}
|
||||
|
||||
var startEvacuationShardCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Start evacuate objects from shard",
|
||||
Long: "Start evacuate objects from shard to other shards",
|
||||
Run: startEvacuateShard,
|
||||
}
|
||||
|
||||
var getEvacuationShardStatusCmd = &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Get evacuate objects from shard status",
|
||||
Long: "Get evacuate objects from shard to other shards status",
|
||||
Run: getEvacuateShardStatus,
|
||||
}
|
||||
|
||||
var stopEvacuationShardCmd = &cobra.Command{
|
||||
Use: "stop",
|
||||
Short: "Stop running evacuate process",
|
||||
Long: "Stop running evacuate process from shard to other shards",
|
||||
Run: stopEvacuateShardStatus,
|
||||
}
|
||||
|
||||
func startEvacuateShard(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
|
||||
|
||||
req := &control.StartShardEvacuationRequest{
|
||||
Body: &control.StartShardEvacuationRequest_Body{
|
||||
Shard_ID: getShardIDList(cmd),
|
||||
IgnoreErrors: ignoreErrors,
|
||||
},
|
||||
}
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.StartShardEvacuationResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.StartShardEvacuation(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "Start evacuate shards failed, rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Shard evacuation has been successfully started.")
|
||||
|
||||
if awaitCompletion, _ := cmd.Flags().GetBool(awaitFlag); awaitCompletion {
|
||||
noProgress, _ := cmd.Flags().GetBool(noProgressFlag)
|
||||
waitEvacuateCompletion(cmd, pk, cli, !noProgress, true)
|
||||
}
|
||||
}
|
||||
|
||||
func getEvacuateShardStatus(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
req := &control.GetShardEvacuationStatusRequest{
|
||||
Body: &control.GetShardEvacuationStatusRequest_Body{},
|
||||
}
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.GetShardEvacuationStatusResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.GetShardEvacuationStatus(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "Get evacuate shards status failed, rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
printStatus(cmd, resp)
|
||||
}
|
||||
|
||||
func stopEvacuateShardStatus(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
req := &control.StopShardEvacuationRequest{
|
||||
Body: &control.StopShardEvacuationRequest_Body{},
|
||||
}
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.StopShardEvacuationResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.StopShardEvacuation(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "Stop evacuate shards failed, rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
waitEvacuateCompletion(cmd, pk, cli, false, false)
|
||||
|
||||
cmd.Println("Evacuation stopped.")
|
||||
}
|
||||
|
||||
func waitEvacuateCompletion(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *clientSDK.Client, printProgress, printCompleted bool) {
|
||||
const statusPollingInterval = 1 * time.Second
|
||||
const reportIntervalSeconds = 5
|
||||
var resp *control.GetShardEvacuationStatusResponse
|
||||
reportResponse := new(atomic.Pointer[control.GetShardEvacuationStatusResponse])
|
||||
pollingCompleted := make(chan struct{})
|
||||
progressReportCompleted := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(progressReportCompleted)
|
||||
if !printProgress {
|
||||
return
|
||||
}
|
||||
cmd.Printf("Progress will be reported every %d seconds.\n", reportIntervalSeconds)
|
||||
for {
|
||||
select {
|
||||
case <-pollingCompleted:
|
||||
return
|
||||
case <-time.After(reportIntervalSeconds * time.Second):
|
||||
r := reportResponse.Load()
|
||||
if r == nil || r.GetBody().GetStatus() == control.GetShardEvacuationStatusResponse_Body_COMPLETED {
|
||||
continue
|
||||
}
|
||||
printStatus(cmd, r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
req := &control.GetShardEvacuationStatusRequest{
|
||||
Body: &control.GetShardEvacuationStatusRequest_Body{},
|
||||
}
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.GetShardEvacuationStatus(client, req)
|
||||
return err
|
||||
})
|
||||
|
||||
reportResponse.Store(resp)
|
||||
|
||||
if err != nil {
|
||||
commonCmd.ExitOnErr(cmd, "Failed to get evacuate status, rpc error: %w", err)
|
||||
return
|
||||
}
|
||||
if resp.GetBody().GetStatus() != control.GetShardEvacuationStatusResponse_Body_RUNNING {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(statusPollingInterval)
|
||||
}
|
||||
close(pollingCompleted)
|
||||
<-progressReportCompleted
|
||||
if printCompleted {
|
||||
printCompletedStatusMessage(cmd, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func printCompletedStatusMessage(cmd *cobra.Command, resp *control.GetShardEvacuationStatusResponse) {
|
||||
cmd.Println("Shard evacuation has been completed.")
|
||||
sb := &strings.Builder{}
|
||||
appendShardIDs(sb, resp)
|
||||
appendCounts(sb, resp)
|
||||
appendError(sb, resp)
|
||||
appendStartedAt(sb, resp)
|
||||
appendDuration(sb, resp)
|
||||
cmd.Println(sb.String())
|
||||
}
|
||||
|
||||
func printStatus(cmd *cobra.Command, resp *control.GetShardEvacuationStatusResponse) {
|
||||
if resp.GetBody().GetStatus() == control.GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED {
|
||||
cmd.Println("There is no running or completed evacuation.")
|
||||
return
|
||||
}
|
||||
sb := &strings.Builder{}
|
||||
appendShardIDs(sb, resp)
|
||||
appendStatus(sb, resp)
|
||||
appendCounts(sb, resp)
|
||||
appendError(sb, resp)
|
||||
appendStartedAt(sb, resp)
|
||||
appendDuration(sb, resp)
|
||||
appendEstimation(sb, resp)
|
||||
cmd.Println(sb.String())
|
||||
}
|
||||
|
||||
func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
if resp.GetBody().GetStatus() != control.GetShardEvacuationStatusResponse_Body_RUNNING ||
|
||||
resp.GetBody().GetDuration() == nil ||
|
||||
resp.GetBody().GetTotal() == 0 ||
|
||||
resp.GetBody().GetEvacuated()+resp.GetBody().GetFailed() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
durationSeconds := float64(resp.GetBody().GetDuration().GetSeconds())
|
||||
evacuated := float64(resp.GetBody().GetEvacuated() + resp.GetBody().GetFailed())
|
||||
avgObjEvacuationTimeSeconds := durationSeconds / evacuated
|
||||
objectsLeft := float64(resp.GetBody().GetTotal()) - evacuated
|
||||
leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft
|
||||
leftMinutes := int(leftSeconds / 60)
|
||||
|
||||
sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes))
|
||||
}
|
||||
|
||||
func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
if resp.GetBody().GetDuration() != nil {
|
||||
duration := time.Second * time.Duration(resp.GetBody().GetDuration().GetSeconds())
|
||||
hour := int(duration.Seconds() / 3600)
|
||||
minute := int(duration.Seconds()/60) % 60
|
||||
second := int(duration.Seconds()) % 60
|
||||
sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second))
|
||||
}
|
||||
}
|
||||
|
||||
func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
if resp.GetBody().GetStartedAt() != nil {
|
||||
startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC()
|
||||
sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339)))
|
||||
}
|
||||
}
|
||||
|
||||
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
if len(resp.Body.GetErrorMessage()) > 0 {
|
||||
sb.WriteString(fmt.Sprintf(" Error: %s.", resp.Body.GetErrorMessage()))
|
||||
}
|
||||
}
|
||||
|
||||
func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
var status string
|
||||
switch resp.GetBody().GetStatus() {
|
||||
case control.GetShardEvacuationStatusResponse_Body_COMPLETED:
|
||||
status = "completed"
|
||||
case control.GetShardEvacuationStatusResponse_Body_RUNNING:
|
||||
status = "running"
|
||||
default:
|
||||
status = "undefined"
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf(" Status: %s.", status))
|
||||
}
|
||||
|
||||
func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
sb.WriteString("Shard IDs: ")
|
||||
for idx, shardID := range resp.GetBody().GetShard_ID() {
|
||||
shardIDStr := shard.NewIDFromBytes(shardID).String()
|
||||
if idx > 0 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(shardIDStr)
|
||||
if idx == len(resp.GetBody().GetShard_ID())-1 {
|
||||
sb.WriteString(".")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
sb.WriteString(fmt.Sprintf(" Evacuated %d object out of %d, failed to evacuate %d objects.",
|
||||
resp.GetBody().GetEvacuated(),
|
||||
resp.Body.GetTotal(),
|
||||
resp.Body.GetFailed()))
|
||||
}
|
||||
|
||||
func initControlEvacuationShardCmd() {
|
||||
evacuationShardCmd.AddCommand(startEvacuationShardCmd)
|
||||
evacuationShardCmd.AddCommand(getEvacuationShardStatusCmd)
|
||||
evacuationShardCmd.AddCommand(stopEvacuationShardCmd)
|
||||
|
||||
initControlStartEvacuationShardCmd()
|
||||
initControlFlags(getEvacuationShardStatusCmd)
|
||||
initControlFlags(stopEvacuationShardCmd)
|
||||
}
|
||||
|
||||
func initControlStartEvacuationShardCmd() {
|
||||
initControlFlags(startEvacuationShardCmd)
|
||||
|
||||
flags := startEvacuationShardCmd.Flags()
|
||||
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
|
||||
flags.Bool(shardAllFlag, false, "Process all shards")
|
||||
flags.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects")
|
||||
flags.Bool(awaitFlag, false, "Block execution until evacuation is completed")
|
||||
flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag))
|
||||
|
||||
startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
|
||||
}
|
|
@ -1,15 +1,10 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
||||
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -19,8 +14,8 @@ const (
|
|||
|
||||
var healthCheckCmd = &cobra.Command{
|
||||
Use: "healthcheck",
|
||||
Short: "Health check of the FrostFS node",
|
||||
Long: "Health check of the FrostFS node. Checks storage node by default, use --ir flag to work with Inner Ring.",
|
||||
Short: "Health check for FrostFS storage nodes",
|
||||
Long: "Health check for FrostFS storage nodes.",
|
||||
Run: healthCheck,
|
||||
}
|
||||
|
||||
|
@ -29,18 +24,18 @@ func initControlHealthCheckCmd() {
|
|||
|
||||
flags := healthCheckCmd.Flags()
|
||||
flags.Bool(healthcheckIRFlag, false, "Communicate with IR node")
|
||||
_ = flags.MarkDeprecated(healthcheckIRFlag, "for health check of inner ring nodes, use the 'control ir healthcheck' command instead.")
|
||||
}
|
||||
|
||||
func healthCheck(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
func healthCheck(cmd *cobra.Command, args []string) {
|
||||
if isIR, _ := cmd.Flags().GetBool(healthcheckIRFlag); isIR {
|
||||
healthCheckIR(cmd, pk, cli)
|
||||
irHealthCheck(cmd, args)
|
||||
return
|
||||
}
|
||||
|
||||
pk := key.Get(cmd)
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
req := new(control.HealthCheckRequest)
|
||||
req.SetBody(new(control.HealthCheckRequest_Body))
|
||||
|
||||
|
@ -59,23 +54,3 @@ func healthCheck(cmd *cobra.Command, _ []string) {
|
|||
cmd.Printf("Network status: %s\n", resp.GetBody().GetNetmapStatus())
|
||||
cmd.Printf("Health status: %s\n", resp.GetBody().GetHealthStatus())
|
||||
}
|
||||
|
||||
func healthCheckIR(cmd *cobra.Command, key *ecdsa.PrivateKey, c *client.Client) {
|
||||
req := new(ircontrol.HealthCheckRequest)
|
||||
|
||||
req.SetBody(new(ircontrol.HealthCheckRequest_Body))
|
||||
|
||||
err := ircontrolsrv.SignMessage(key, req)
|
||||
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
|
||||
|
||||
var resp *ircontrol.HealthCheckResponse
|
||||
err = c.ExecRaw(func(client *rawclient.Client) error {
|
||||
resp, err = ircontrol.HealthCheck(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Printf("Health status: %s\n", resp.GetBody().GetHealthStatus())
|
||||
}
|
||||
|
|
|
@ -10,6 +10,10 @@ var irCmd = &cobra.Command{
|
|||
|
||||
func initControlIRCmd() {
|
||||
irCmd.AddCommand(tickEpochCmd)
|
||||
irCmd.AddCommand(removeNodeCmd)
|
||||
irCmd.AddCommand(irHealthCheckCmd)
|
||||
|
||||
initControlIRTickEpochCmd()
|
||||
initControlIRRemoveNodeCmd()
|
||||
initControlIRHealthCheckCmd()
|
||||
}
|
||||
|
|
44
cmd/frostfs-cli/modules/control/ir_healthcheck.go
Normal file
44
cmd/frostfs-cli/modules/control/ir_healthcheck.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
||||
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var irHealthCheckCmd = &cobra.Command{
|
||||
Use: "healthcheck",
|
||||
Short: "Health check for FrostFS inner ring nodes",
|
||||
Long: "Health check for FrostFS inner ring nodes.",
|
||||
Run: irHealthCheck,
|
||||
}
|
||||
|
||||
func initControlIRHealthCheckCmd() {
|
||||
initControlFlags(irHealthCheckCmd)
|
||||
}
|
||||
|
||||
func irHealthCheck(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
req := new(ircontrol.HealthCheckRequest)
|
||||
|
||||
req.SetBody(new(ircontrol.HealthCheckRequest_Body))
|
||||
|
||||
err := ircontrolsrv.SignMessage(pk, req)
|
||||
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
|
||||
|
||||
var resp *ircontrol.HealthCheckResponse
|
||||
err = cli.ExecRaw(func(client *rawclient.Client) error {
|
||||
resp, err = ircontrol.HealthCheck(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Printf("Health status: %s\n", resp.GetBody().GetHealthStatus())
|
||||
}
|
58
cmd/frostfs-cli/modules/control/ir_remove_node.go
Normal file
58
cmd/frostfs-cli/modules/control/ir_remove_node.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
|
||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
||||
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var removeNodeCmd = &cobra.Command{
|
||||
Use: "remove-node",
|
||||
Short: "Forces a node removal from netmap",
|
||||
Long: "Forces a node removal from netmap via a notary request. It should be executed on other IR nodes as well.",
|
||||
Run: removeNode,
|
||||
}
|
||||
|
||||
func initControlIRRemoveNodeCmd() {
|
||||
initControlFlags(removeNodeCmd)
|
||||
|
||||
flags := removeNodeCmd.Flags()
|
||||
flags.String("node", "", "Node public key as a hex string")
|
||||
_ = removeNodeCmd.MarkFlagRequired("node")
|
||||
}
|
||||
|
||||
func removeNode(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
c := getClient(cmd, pk)
|
||||
|
||||
nodeKeyStr, _ := cmd.Flags().GetString("node")
|
||||
if len(nodeKeyStr) == 0 {
|
||||
commonCmd.ExitOnErr(cmd, "parsing node public key: ", errors.New("key cannot be empty"))
|
||||
}
|
||||
nodeKey, err := hex.DecodeString(nodeKeyStr)
|
||||
commonCmd.ExitOnErr(cmd, "can't decode node public key: %w", err)
|
||||
|
||||
req := new(ircontrol.RemoveNodeRequest)
|
||||
req.SetBody(&ircontrol.RemoveNodeRequest_Body{
|
||||
Key: nodeKey,
|
||||
})
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "could not sign request: %w", ircontrolsrv.SignMessage(pk, req))
|
||||
|
||||
var resp *ircontrol.RemoveNodeResponse
|
||||
err = c.ExecRaw(func(client *rawclient.Client) error {
|
||||
resp, err = ircontrol.RemoveNode(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Node removed")
|
||||
}
|
|
@ -13,13 +13,14 @@ var shardsCmd = &cobra.Command{
|
|||
func initControlShardsCmd() {
|
||||
shardsCmd.AddCommand(listShardsCmd)
|
||||
shardsCmd.AddCommand(setShardModeCmd)
|
||||
shardsCmd.AddCommand(evacuateShardCmd)
|
||||
shardsCmd.AddCommand(evacuationShardCmd)
|
||||
shardsCmd.AddCommand(flushCacheCmd)
|
||||
shardsCmd.AddCommand(doctorCmd)
|
||||
|
||||
initControlShardsListCmd()
|
||||
initControlSetShardModeCmd()
|
||||
initControlEvacuateShardCmd()
|
||||
initControlEvacuationShardCmd()
|
||||
initControlFlushCacheCmd()
|
||||
initControlDoctorCmd()
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func verifyResponse(cmd *cobra.Command,
|
|||
commonCmd.ExitOnErr(cmd, "", errors.New("missing response signature"))
|
||||
}
|
||||
|
||||
// TODO(@cthulhu-rider): #1387 use Signature message from NeoFS API to avoid conversion
|
||||
// TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion
|
||||
var sigV2 refs.Signature
|
||||
sigV2.SetScheme(refs.ECDSA_SHA512)
|
||||
sigV2.SetKey(sigControl.GetKey())
|
||||
|
|
|
@ -19,7 +19,7 @@ var getEpochCmd = &cobra.Command{
|
|||
var prm internalclient.NetworkInfoPrm
|
||||
prm.SetClient(cli)
|
||||
|
||||
res, err := internalclient.NetworkInfo(prm)
|
||||
res, err := internalclient.NetworkInfo(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
netInfo := res.NetworkInfo()
|
||||
|
|
|
@ -23,7 +23,7 @@ var netInfoCmd = &cobra.Command{
|
|||
var prm internalclient.NetworkInfoPrm
|
||||
prm.SetClient(cli)
|
||||
|
||||
res, err := internalclient.NetworkInfo(prm)
|
||||
res, err := internalclient.NetworkInfo(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
netInfo := res.NetworkInfo()
|
||||
|
|
|
@ -25,7 +25,7 @@ var nodeInfoCmd = &cobra.Command{
|
|||
var prm internalclient.NodeInfoPrm
|
||||
prm.SetClient(cli)
|
||||
|
||||
res, err := internalclient.NodeInfo(prm)
|
||||
res, err := internalclient.NodeInfo(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
prettyPrintNodeInfo(cmd, res.NodeInfo())
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package netmap
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -14,7 +15,9 @@ var Cmd = &cobra.Command{
|
|||
// the viper before execution
|
||||
commonflags.Bind(cmd)
|
||||
commonflags.BindAPI(cmd)
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -19,7 +19,7 @@ var snapshotCmd = &cobra.Command{
|
|||
var prm internalclient.NetMapSnapshotPrm
|
||||
prm.SetClient(cli)
|
||||
|
||||
res, err := internalclient.NetMapSnapshot(prm)
|
||||
res, err := internalclient.NetMapSnapshot(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
commonCmd.PrettyPrintNetMap(cmd, res.NetMap(), false)
|
||||
|
|
|
@ -65,7 +65,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
|
|||
Prepare(cmd, &prm)
|
||||
prm.SetAddress(objAddr)
|
||||
|
||||
res, err := internalclient.DeleteObject(prm)
|
||||
res, err := internalclient.DeleteObject(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
tomb := res.Tombstone()
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/cheggaaa/pb"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -84,13 +84,13 @@ func getObject(cmd *cobra.Command, _ []string) {
|
|||
p = pb.New64(0)
|
||||
p.Output = cmd.OutOrStdout()
|
||||
prm.SetPayloadWriter(p.NewProxyWriter(payloadWriter))
|
||||
prm.SetHeaderCallback(func(o *object.Object) {
|
||||
prm.SetHeaderCallback(func(o *objectSDK.Object) {
|
||||
p.SetTotal64(int64(o.PayloadSize()))
|
||||
p.Start()
|
||||
})
|
||||
}
|
||||
|
||||
res, err := internalclient.GetObject(prm)
|
||||
res, err := internalclient.GetObject(cmd.Context(), prm)
|
||||
if p != nil {
|
||||
p.Finish()
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
|||
headPrm.SetAddress(objAddr)
|
||||
|
||||
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
|
||||
res, err := internalclient.HeadObject(headPrm)
|
||||
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
var cs checksum.Checksum
|
||||
|
@ -108,7 +108,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
|||
hashPrm.TZ()
|
||||
}
|
||||
|
||||
res, err := internalclient.HashPayloadRanges(hashPrm)
|
||||
res, err := internalclient.HashPayloadRanges(cmd.Context(), hashPrm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
hs := res.HashList()
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -64,7 +64,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
|
|||
prm.SetAddress(objAddr)
|
||||
prm.SetMainOnlyFlag(mainOnly)
|
||||
|
||||
res, err := internalclient.HeadObject(prm)
|
||||
res, err := internalclient.HeadObject(cmd.Context(), prm)
|
||||
if err != nil {
|
||||
if ok := printSplitInfoErr(cmd, err); ok {
|
||||
return
|
||||
|
@ -77,7 +77,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "", err)
|
||||
}
|
||||
|
||||
func saveAndPrintHeader(cmd *cobra.Command, obj *object.Object, filename string) error {
|
||||
func saveAndPrintHeader(cmd *cobra.Command, obj *objectSDK.Object, filename string) error {
|
||||
bs, err := marshalHeader(cmd, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal header: %w", err)
|
||||
|
@ -97,7 +97,7 @@ func saveAndPrintHeader(cmd *cobra.Command, obj *object.Object, filename string)
|
|||
return printHeader(cmd, obj)
|
||||
}
|
||||
|
||||
func marshalHeader(cmd *cobra.Command, hdr *object.Object) ([]byte, error) {
|
||||
func marshalHeader(cmd *cobra.Command, hdr *objectSDK.Object) ([]byte, error) {
|
||||
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
|
||||
toProto, _ := cmd.Flags().GetBool("proto")
|
||||
switch {
|
||||
|
@ -138,7 +138,7 @@ func printContainerID(cmd *cobra.Command, recv func() (cid.ID, bool)) {
|
|||
cmd.Printf("CID: %s\n", strID)
|
||||
}
|
||||
|
||||
func printHeader(cmd *cobra.Command, obj *object.Object) error {
|
||||
func printHeader(cmd *cobra.Command, obj *objectSDK.Object) error {
|
||||
printObjectID(cmd, obj.ID)
|
||||
printContainerID(cmd, obj.ContainerID)
|
||||
cmd.Printf("Owner: %s\n", obj.OwnerID())
|
||||
|
@ -150,7 +150,7 @@ func printHeader(cmd *cobra.Command, obj *object.Object) error {
|
|||
|
||||
cmd.Println("Attributes:")
|
||||
for _, attr := range obj.Attributes() {
|
||||
if attr.Key() == object.AttributeTimestamp {
|
||||
if attr.Key() == objectSDK.AttributeTimestamp {
|
||||
cmd.Printf(" %s=%s (%s)\n",
|
||||
attr.Key(),
|
||||
attr.Value(),
|
||||
|
@ -163,7 +163,7 @@ func printHeader(cmd *cobra.Command, obj *object.Object) error {
|
|||
if signature := obj.Signature(); signature != nil {
|
||||
cmd.Print("ID signature:\n")
|
||||
|
||||
// TODO(@carpawell): #1387 implement and use another approach to avoid conversion
|
||||
// TODO(@carpawell): #468 implement and use another approach to avoid conversion
|
||||
var sigV2 refs.Signature
|
||||
signature.WriteToV2(&sigV2)
|
||||
|
||||
|
@ -174,7 +174,7 @@ func printHeader(cmd *cobra.Command, obj *object.Object) error {
|
|||
return printSplitHeader(cmd, obj)
|
||||
}
|
||||
|
||||
func printSplitHeader(cmd *cobra.Command, obj *object.Object) error {
|
||||
func printSplitHeader(cmd *cobra.Command, obj *objectSDK.Object) error {
|
||||
if splitID := obj.SplitID(); splitID != nil {
|
||||
cmd.Printf("Split ID: %s\n", splitID)
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ var objectLockCmd = &cobra.Command{
|
|||
Prepare(cmd, &prm)
|
||||
prm.SetHeader(obj)
|
||||
|
||||
res, err := internalclient.PutObject(prm)
|
||||
res, err := internalclient.PutObject(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "Store lock object in FrostFS: %w", err)
|
||||
|
||||
cmd.Printf("Lock object ID: %s\n", res.ID())
|
||||
|
|
348
cmd/frostfs-cli/modules/object/nodes.go
Normal file
348
cmd/frostfs-cli/modules/object/nodes.go
Normal file
|
@ -0,0 +1,348 @@
|
|||
package object
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
verifyPresenceAllFlag = "verify-presence-all"
|
||||
)
|
||||
|
||||
type objectNodesInfo struct {
|
||||
containerID cid.ID
|
||||
objectID oid.ID
|
||||
relatedObjectIDs []oid.ID
|
||||
isLock bool
|
||||
}
|
||||
|
||||
type boolError struct {
|
||||
value bool
|
||||
err error
|
||||
}
|
||||
|
||||
var objectNodesCmd = &cobra.Command{
|
||||
Use: "nodes",
|
||||
Short: "List of nodes where the object is stored",
|
||||
Long: `List of nodes where the object should be stored and where it is actually stored.
|
||||
Lock objects must exist on all nodes of the container.
|
||||
For complex objects, a node is considered to store an object if the node stores at least one part of the complex object.
|
||||
By default, the actual storage of the object is checked only on the nodes that should store the object. To check all nodes, use the flag --verify-presence-all.`,
|
||||
Run: objectNodes,
|
||||
}
|
||||
|
||||
func initObjectNodesCmd() {
|
||||
commonflags.Init(objectNodesCmd)
|
||||
|
||||
flags := objectNodesCmd.Flags()
|
||||
|
||||
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
_ = objectGetCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||
|
||||
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
|
||||
_ = objectGetCmd.MarkFlagRequired(commonflags.OIDFlag)
|
||||
|
||||
flags.Bool("verify-presence-all", false, "Verify the actual presence of the object on all netmap nodes")
|
||||
}
|
||||
|
||||
func objectNodes(cmd *cobra.Command, _ []string) {
|
||||
var cnrID cid.ID
|
||||
var objID oid.ID
|
||||
readObjectAddress(cmd, &cnrID, &objID)
|
||||
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
|
||||
objectInfo := getObjectInfo(cmd, cnrID, objID, cli, pk)
|
||||
|
||||
placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli)
|
||||
|
||||
requiredPlacement := getRequiredPlacement(cmd, objectInfo, placementPolicy, netmap)
|
||||
|
||||
actualPlacement := getActualPlacement(cmd, netmap, requiredPlacement, pk, objectInfo)
|
||||
|
||||
printPlacement(cmd, netmap, requiredPlacement, actualPlacement)
|
||||
}
|
||||
|
||||
func getObjectInfo(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) *objectNodesInfo {
|
||||
var addrObj oid.Address
|
||||
addrObj.SetContainer(cnrID)
|
||||
addrObj.SetObject(objID)
|
||||
|
||||
var prmHead internalclient.HeadObjectPrm
|
||||
prmHead.SetClient(cli)
|
||||
prmHead.SetAddress(addrObj)
|
||||
prmHead.SetRawFlag(true)
|
||||
|
||||
Prepare(cmd, &prmHead)
|
||||
readSession(cmd, &prmHead, pk, cnrID, objID)
|
||||
|
||||
res, err := internalclient.HeadObject(cmd.Context(), prmHead)
|
||||
if err == nil {
|
||||
return &objectNodesInfo{
|
||||
containerID: cnrID,
|
||||
objectID: objID,
|
||||
isLock: res.Header().Type() == objectSDK.TypeLock,
|
||||
}
|
||||
}
|
||||
|
||||
var errSplitInfo *objectSDK.SplitInfoError
|
||||
|
||||
if !errors.As(err, &errSplitInfo) {
|
||||
commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
splitInfo := errSplitInfo.SplitInfo()
|
||||
|
||||
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
|
||||
return &objectNodesInfo{
|
||||
containerID: cnrID,
|
||||
objectID: objID,
|
||||
relatedObjectIDs: members,
|
||||
}
|
||||
}
|
||||
|
||||
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok {
|
||||
return &objectNodesInfo{
|
||||
containerID: cnrID,
|
||||
objectID: objID,
|
||||
relatedObjectIDs: members,
|
||||
}
|
||||
}
|
||||
|
||||
members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
|
||||
return &objectNodesInfo{
|
||||
containerID: cnrID,
|
||||
objectID: objID,
|
||||
relatedObjectIDs: members,
|
||||
}
|
||||
}
|
||||
|
||||
func getPlacementPolicyAndNetmap(cmd *cobra.Command, cnrID cid.ID, cli *client.Client) (placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) {
|
||||
eg, egCtx := errgroup.WithContext(cmd.Context())
|
||||
eg.Go(func() (e error) {
|
||||
placementPolicy, e = getPlacementPolicy(egCtx, cnrID, cli)
|
||||
return
|
||||
})
|
||||
eg.Go(func() (e error) {
|
||||
netmap, e = getNetMap(egCtx, cli)
|
||||
return
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", eg.Wait())
|
||||
return
|
||||
}
|
||||
|
||||
func getPlacementPolicy(ctx context.Context, cnrID cid.ID, cli *client.Client) (netmapSDK.PlacementPolicy, error) {
|
||||
var prm internalclient.GetContainerPrm
|
||||
prm.SetClient(cli)
|
||||
prm.SetContainer(cnrID)
|
||||
|
||||
res, err := internalclient.GetContainer(ctx, prm)
|
||||
if err != nil {
|
||||
return netmapSDK.PlacementPolicy{}, err
|
||||
}
|
||||
|
||||
return res.Container().PlacementPolicy(), nil
|
||||
}
|
||||
|
||||
func getNetMap(ctx context.Context, cli *client.Client) (*netmapSDK.NetMap, error) {
|
||||
var prm internalclient.NetMapSnapshotPrm
|
||||
prm.SetClient(cli)
|
||||
|
||||
res, err := internalclient.NetMapSnapshot(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nm := res.NetMap()
|
||||
return &nm, nil
|
||||
}
|
||||
|
||||
func getRequiredPlacement(cmd *cobra.Command, objInfo *objectNodesInfo, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) map[uint64]netmapSDK.NodeInfo {
|
||||
nodes := make(map[uint64]netmapSDK.NodeInfo)
|
||||
placementBuilder := placement.NewNetworkMapBuilder(netmap)
|
||||
placement, err := placementBuilder.BuildPlacement(objInfo.containerID, &objInfo.objectID, placementPolicy)
|
||||
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
|
||||
for repIdx, rep := range placement {
|
||||
numOfReplicas := placementPolicy.ReplicaNumberByIndex(repIdx)
|
||||
var nodeIdx uint32
|
||||
for _, n := range rep {
|
||||
if !objInfo.isLock && nodeIdx == numOfReplicas { //lock object should be on all container nodes
|
||||
break
|
||||
}
|
||||
nodes[n.Hash()] = n
|
||||
nodeIdx++
|
||||
}
|
||||
}
|
||||
|
||||
for _, relatedObjID := range objInfo.relatedObjectIDs {
|
||||
placement, err = placementBuilder.BuildPlacement(objInfo.containerID, &relatedObjID, placementPolicy)
|
||||
commonCmd.ExitOnErr(cmd, "failed to get required placement for related object: %w", err)
|
||||
for _, rep := range placement {
|
||||
for _, n := range rep {
|
||||
nodes[n.Hash()] = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacement map[uint64]netmapSDK.NodeInfo,
|
||||
pk *ecdsa.PrivateKey, objInfo *objectNodesInfo) map[uint64]boolError {
|
||||
result := make(map[uint64]boolError)
|
||||
resultMtx := &sync.Mutex{}
|
||||
|
||||
var candidates []netmapSDK.NodeInfo
|
||||
checkAllNodes, _ := cmd.Flags().GetBool(verifyPresenceAllFlag)
|
||||
if checkAllNodes {
|
||||
candidates = netmap.Nodes()
|
||||
} else {
|
||||
for _, n := range requiredPlacement {
|
||||
candidates = append(candidates, n)
|
||||
}
|
||||
}
|
||||
|
||||
eg, egCtx := errgroup.WithContext(cmd.Context())
|
||||
for _, cand := range candidates {
|
||||
cand := cand
|
||||
|
||||
eg.Go(func() error {
|
||||
cli, err := createClient(egCtx, cmd, cand, pk)
|
||||
if err != nil {
|
||||
resultMtx.Lock()
|
||||
defer resultMtx.Unlock()
|
||||
result[cand.Hash()] = boolError{err: err}
|
||||
return nil
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
var v boolError
|
||||
v.value, v.err = isObjectStoredOnNode(egCtx, cmd, objInfo.containerID, objInfo.objectID, cli, pk)
|
||||
resultMtx.Lock()
|
||||
defer resultMtx.Unlock()
|
||||
if prev, exists := result[cand.Hash()]; exists && (prev.err != nil || prev.value) {
|
||||
return nil
|
||||
}
|
||||
result[cand.Hash()] = v
|
||||
return nil
|
||||
})
|
||||
|
||||
for _, rObjID := range objInfo.relatedObjectIDs {
|
||||
rObjID := rObjID
|
||||
eg.Go(func() error {
|
||||
var v boolError
|
||||
v.value, v.err = isObjectStoredOnNode(egCtx, cmd, objInfo.containerID, rObjID, cli, pk)
|
||||
resultMtx.Lock()
|
||||
defer resultMtx.Unlock()
|
||||
if prev, exists := result[cand.Hash()]; exists && (prev.err != nil || prev.value) {
|
||||
return nil
|
||||
}
|
||||
result[cand.Hash()] = v
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
|
||||
return result
|
||||
}
|
||||
|
||||
func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.NodeInfo, pk *ecdsa.PrivateKey) (*client.Client, error) {
|
||||
var cli *client.Client
|
||||
var addresses []string
|
||||
candidate.IterateNetworkEndpoints(func(s string) bool {
|
||||
addresses = append(addresses, s)
|
||||
return false
|
||||
})
|
||||
addresses = append(addresses, candidate.ExternalAddresses()...)
|
||||
var lastErr error
|
||||
for _, address := range addresses {
|
||||
var networkAddr network.Address
|
||||
lastErr = networkAddr.FromString(address)
|
||||
if lastErr != nil {
|
||||
continue
|
||||
}
|
||||
cli, lastErr = internalclient.GetSDKClient(ctx, cmd, pk, networkAddr)
|
||||
if lastErr == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if lastErr != nil {
|
||||
return nil, lastErr
|
||||
}
|
||||
if cli == nil {
|
||||
return nil, fmt.Errorf("failed to create client: no available endpoint")
|
||||
}
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) {
|
||||
var addrObj oid.Address
|
||||
addrObj.SetContainer(cnrID)
|
||||
addrObj.SetObject(objID)
|
||||
|
||||
var prmHead internalclient.HeadObjectPrm
|
||||
prmHead.SetClient(cli)
|
||||
prmHead.SetAddress(addrObj)
|
||||
|
||||
Prepare(cmd, &prmHead)
|
||||
prmHead.SetTTL(1)
|
||||
readSession(cmd, &prmHead, pk, cnrID, objID)
|
||||
|
||||
res, err := internalclient.HeadObject(ctx, prmHead)
|
||||
if err == nil && res != nil {
|
||||
return true, nil
|
||||
}
|
||||
var notFound *apistatus.ObjectNotFound
|
||||
var removed *apistatus.ObjectAlreadyRemoved
|
||||
if errors.As(err, ¬Found) || errors.As(err, &removed) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func printPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacement map[uint64]netmapSDK.NodeInfo, actualPlacement map[uint64]boolError) {
|
||||
w := tabwriter.NewWriter(cmd.OutOrStdout(), 0, 0, 1, ' ', tabwriter.AlignRight|tabwriter.Debug)
|
||||
defer func() {
|
||||
commonCmd.ExitOnErr(cmd, "failed to print placement info: %w", w.Flush())
|
||||
}()
|
||||
fmt.Fprintln(w, "Node ID\tShould contain object\tActually contains object\t")
|
||||
for _, n := range netmap.Nodes() {
|
||||
nodeID := hex.EncodeToString(n.PublicKey())
|
||||
_, required := requiredPlacement[n.Hash()]
|
||||
actual, actualExists := actualPlacement[n.Hash()]
|
||||
actualStr := ""
|
||||
if actualExists {
|
||||
if actual.err != nil {
|
||||
actualStr = fmt.Sprintf("error: %v", actual.err)
|
||||
} else {
|
||||
actualStr = strconv.FormatBool(actual.value)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t\n", nodeID, strconv.FormatBool(required), actualStr)
|
||||
}
|
||||
}
|
|
@ -16,15 +16,17 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/cheggaaa/pb"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
noProgressFlag = "no-progress"
|
||||
notificationFlag = "notify"
|
||||
noProgressFlag = "no-progress"
|
||||
notificationFlag = "notify"
|
||||
copiesNumberFlag = "copies-number"
|
||||
prepareLocallyFlag = "prepare-locally"
|
||||
)
|
||||
|
||||
var putExpiredOn uint64
|
||||
|
@ -53,9 +55,12 @@ func initObjectPutCmd() {
|
|||
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
|
||||
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
|
||||
flags.Bool(noProgressFlag, false, "Do not show progress bar")
|
||||
flags.Bool(prepareLocallyFlag, false, "Generate object header on the client side (for big object - split locally too)")
|
||||
|
||||
flags.String(notificationFlag, "", "Object notification in the form of *epoch*:*topic*; '-' topic means using default")
|
||||
flags.Bool(binaryFlag, false, "Deserialize object structure from given file.")
|
||||
|
||||
flags.String(copiesNumberFlag, "", "Number of copies of the object to store within the RPC call")
|
||||
}
|
||||
|
||||
func putObject(cmd *cobra.Command, _ []string) {
|
||||
|
@ -76,7 +81,7 @@ func putObject(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't open file '%s': %w", filename, err))
|
||||
}
|
||||
var payloadReader io.Reader = f
|
||||
obj := object.New()
|
||||
obj := objectSDK.New()
|
||||
|
||||
if binary {
|
||||
payloadReader, cnr, ownerID = readFilePayload(filename, cmd)
|
||||
|
@ -99,7 +104,12 @@ func putObject(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
var prm internalclient.PutObjectPrm
|
||||
ReadOrOpenSession(cmd, &prm, pk, cnr, nil)
|
||||
if prepareLocally, _ := cmd.Flags().GetBool(prepareLocallyFlag); prepareLocally {
|
||||
prm.SetClient(internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC))
|
||||
prm.PrepareLocally()
|
||||
} else {
|
||||
ReadOrOpenSession(cmd, &prm, pk, cnr, nil)
|
||||
}
|
||||
Prepare(cmd, &prm)
|
||||
prm.SetHeader(obj)
|
||||
|
||||
|
@ -116,7 +126,11 @@ func putObject(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
}
|
||||
|
||||
res, err := internalclient.PutObject(prm)
|
||||
copyNum, err := cmd.Flags().GetString(copiesNumberFlag)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err)
|
||||
prm.SetCopiesNumberByVectors(parseCopyNumber(cmd, copyNum))
|
||||
|
||||
res, err := internalclient.PutObject(cmd.Context(), prm)
|
||||
if p != nil {
|
||||
p.Finish()
|
||||
}
|
||||
|
@ -126,10 +140,22 @@ func putObject(cmd *cobra.Command, _ []string) {
|
|||
cmd.Printf(" OID: %s\n CID: %s\n", res.ID(), cnr)
|
||||
}
|
||||
|
||||
func parseCopyNumber(cmd *cobra.Command, copyNum string) []uint32 {
|
||||
var cn []uint32
|
||||
if len(copyNum) > 0 {
|
||||
for _, num := range strings.Split(copyNum, ",") {
|
||||
val, err := strconv.ParseUint(num, 10, 32)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err)
|
||||
cn = append(cn, uint32(val))
|
||||
}
|
||||
}
|
||||
return cn
|
||||
}
|
||||
|
||||
func readFilePayload(filename string, cmd *cobra.Command) (io.Reader, cid.ID, user.ID) {
|
||||
buf, err := os.ReadFile(filename)
|
||||
commonCmd.ExitOnErr(cmd, "unable to read given file: %w", err)
|
||||
objTemp := object.New()
|
||||
objTemp := objectSDK.New()
|
||||
// TODO(@acid-ant): #1932 Use streams to marshal/unmarshal payload
|
||||
commonCmd.ExitOnErr(cmd, "can't unmarshal object from given file: %w", objTemp.Unmarshal(buf))
|
||||
payloadReader := bytes.NewReader(objTemp.Payload())
|
||||
|
@ -148,19 +174,19 @@ func setFilePayloadReader(cmd *cobra.Command, f *os.File, prm *internalclient.Pu
|
|||
p := pb.New64(fi.Size())
|
||||
p.Output = cmd.OutOrStdout()
|
||||
prm.SetPayloadReader(p.NewProxyReader(f))
|
||||
prm.SetHeaderCallback(func(o *object.Object) { p.Start() })
|
||||
prm.SetHeaderCallback(func(o *objectSDK.Object) { p.Start() })
|
||||
return p
|
||||
}
|
||||
|
||||
func setBinaryPayloadReader(cmd *cobra.Command, obj *object.Object, prm *internalclient.PutObjectPrm, payloadReader io.Reader) *pb.ProgressBar {
|
||||
func setBinaryPayloadReader(cmd *cobra.Command, obj *objectSDK.Object, prm *internalclient.PutObjectPrm, payloadReader io.Reader) *pb.ProgressBar {
|
||||
p := pb.New(len(obj.Payload()))
|
||||
p.Output = cmd.OutOrStdout()
|
||||
prm.SetPayloadReader(p.NewProxyReader(payloadReader))
|
||||
prm.SetHeaderCallback(func(o *object.Object) { p.Start() })
|
||||
prm.SetHeaderCallback(func(o *objectSDK.Object) { p.Start() })
|
||||
return p
|
||||
}
|
||||
|
||||
func getAllObjectAttributes(cmd *cobra.Command) []object.Attribute {
|
||||
func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
|
||||
attrs, err := parseObjectAttrs(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse object attributes: %w", err)
|
||||
|
||||
|
@ -179,7 +205,7 @@ func getAllObjectAttributes(cmd *cobra.Command) []object.Attribute {
|
|||
|
||||
if !expAttrFound {
|
||||
index := len(attrs)
|
||||
attrs = append(attrs, object.Attribute{})
|
||||
attrs = append(attrs, objectSDK.Attribute{})
|
||||
attrs[index].SetKey(objectV2.SysAttributeExpEpoch)
|
||||
attrs[index].SetValue(expAttrValue)
|
||||
}
|
||||
|
@ -187,7 +213,7 @@ func getAllObjectAttributes(cmd *cobra.Command) []object.Attribute {
|
|||
return attrs
|
||||
}
|
||||
|
||||
func parseObjectAttrs(cmd *cobra.Command) ([]object.Attribute, error) {
|
||||
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
|
||||
var rawAttrs []string
|
||||
|
||||
raw := cmd.Flag("attributes").Value.String()
|
||||
|
@ -195,7 +221,7 @@ func parseObjectAttrs(cmd *cobra.Command) ([]object.Attribute, error) {
|
|||
rawAttrs = strings.Split(raw, ",")
|
||||
}
|
||||
|
||||
attrs := make([]object.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
|
||||
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
|
||||
for i := range rawAttrs {
|
||||
k, v, found := strings.Cut(rawAttrs[i], "=")
|
||||
if !found {
|
||||
|
@ -209,23 +235,23 @@ func parseObjectAttrs(cmd *cobra.Command) ([]object.Attribute, error) {
|
|||
if !disableFilename {
|
||||
filename := filepath.Base(cmd.Flag(fileFlag).Value.String())
|
||||
index := len(attrs)
|
||||
attrs = append(attrs, object.Attribute{})
|
||||
attrs[index].SetKey(object.AttributeFileName)
|
||||
attrs = append(attrs, objectSDK.Attribute{})
|
||||
attrs[index].SetKey(objectSDK.AttributeFileName)
|
||||
attrs[index].SetValue(filename)
|
||||
}
|
||||
|
||||
disableTime, _ := cmd.Flags().GetBool("disable-timestamp")
|
||||
if !disableTime {
|
||||
index := len(attrs)
|
||||
attrs = append(attrs, object.Attribute{})
|
||||
attrs[index].SetKey(object.AttributeTimestamp)
|
||||
attrs = append(attrs, objectSDK.Attribute{})
|
||||
attrs[index].SetKey(objectSDK.AttributeTimestamp)
|
||||
attrs[index].SetValue(strconv.FormatInt(time.Now().Unix(), 10))
|
||||
}
|
||||
|
||||
return attrs, nil
|
||||
}
|
||||
|
||||
func parseObjectNotifications(cmd *cobra.Command) (*object.NotificationInfo, error) {
|
||||
func parseObjectNotifications(cmd *cobra.Command) (*objectSDK.NotificationInfo, error) {
|
||||
const (
|
||||
separator = ":"
|
||||
useDefaultTopic = "-"
|
||||
|
@ -241,7 +267,7 @@ func parseObjectNotifications(cmd *cobra.Command) (*object.NotificationInfo, err
|
|||
return nil, fmt.Errorf("notification must be in the form of: *epoch*%s*topic*, got %s", separator, raw)
|
||||
}
|
||||
|
||||
ni := new(object.NotificationInfo)
|
||||
ni := new(objectSDK.NotificationInfo)
|
||||
|
||||
epoch, err := strconv.ParseUint(before, 10, 64)
|
||||
if err != nil {
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -87,7 +87,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
|
|||
prm.SetRange(ranges[0])
|
||||
prm.SetPayloadWriter(out)
|
||||
|
||||
_, err = internalclient.PayloadRange(prm)
|
||||
_, err = internalclient.PayloadRange(cmd.Context(), prm)
|
||||
if err != nil {
|
||||
if ok := printSplitInfoErr(cmd, err); ok {
|
||||
return
|
||||
|
@ -102,7 +102,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
func printSplitInfoErr(cmd *cobra.Command, err error) bool {
|
||||
var errSplitInfo *object.SplitInfoError
|
||||
var errSplitInfo *objectSDK.SplitInfoError
|
||||
|
||||
ok := errors.As(err, &errSplitInfo)
|
||||
|
||||
|
@ -114,14 +114,14 @@ func printSplitInfoErr(cmd *cobra.Command, err error) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
func printSplitInfo(cmd *cobra.Command, info *object.SplitInfo) {
|
||||
func printSplitInfo(cmd *cobra.Command, info *objectSDK.SplitInfo) {
|
||||
bs, err := marshalSplitInfo(cmd, info)
|
||||
commonCmd.ExitOnErr(cmd, "can't marshal split info: %w", err)
|
||||
|
||||
cmd.Println(string(bs))
|
||||
}
|
||||
|
||||
func marshalSplitInfo(cmd *cobra.Command, info *object.SplitInfo) ([]byte, error) {
|
||||
func marshalSplitInfo(cmd *cobra.Command, info *objectSDK.SplitInfo) ([]byte, error) {
|
||||
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
|
||||
toProto, _ := cmd.Flags().GetBool("proto")
|
||||
switch {
|
||||
|
@ -146,13 +146,13 @@ func marshalSplitInfo(cmd *cobra.Command, info *object.SplitInfo) ([]byte, error
|
|||
}
|
||||
}
|
||||
|
||||
func getRangeList(cmd *cobra.Command) ([]*object.Range, error) {
|
||||
func getRangeList(cmd *cobra.Command) ([]*objectSDK.Range, error) {
|
||||
v := cmd.Flag("range").Value.String()
|
||||
if len(v) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
vs := strings.Split(v, ",")
|
||||
rs := make([]*object.Range, len(vs))
|
||||
rs := make([]*objectSDK.Range, len(vs))
|
||||
for i := range vs {
|
||||
before, after, found := strings.Cut(vs[i], rangeSep)
|
||||
if !found {
|
||||
|
@ -176,7 +176,7 @@ func getRangeList(cmd *cobra.Command) ([]*object.Range, error) {
|
|||
return nil, fmt.Errorf("invalid '%s' range: uint64 overflow", vs[i])
|
||||
}
|
||||
|
||||
rs[i] = object.NewRange()
|
||||
rs[i] = objectSDK.NewRange()
|
||||
rs[i].SetOffset(offset)
|
||||
rs[i].SetLength(length)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package object
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -15,7 +16,9 @@ var Cmd = &cobra.Command{
|
|||
// the viper before execution
|
||||
commonflags.Bind(cmd)
|
||||
commonflags.BindAPI(cmd)
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -27,7 +30,8 @@ func init() {
|
|||
objectHeadCmd,
|
||||
objectHashCmd,
|
||||
objectRangeCmd,
|
||||
objectLockCmd}
|
||||
objectLockCmd,
|
||||
objectNodesCmd}
|
||||
|
||||
Cmd.AddCommand(objectChildCommands...)
|
||||
|
||||
|
@ -44,4 +48,5 @@ func init() {
|
|||
initObjectHashCmd()
|
||||
initObjectRangeCmd()
|
||||
initCommandObjectLock()
|
||||
initObjectNodesCmd()
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -61,7 +61,7 @@ func searchObject(cmd *cobra.Command, _ []string) {
|
|||
prm.SetContainerID(cnr)
|
||||
prm.SetFilters(sf)
|
||||
|
||||
res, err := internalclient.SearchObjects(prm)
|
||||
res, err := internalclient.SearchObjects(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
ids := res.IDList()
|
||||
|
@ -72,18 +72,18 @@ func searchObject(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
}
|
||||
|
||||
var searchUnaryOpVocabulary = map[string]object.SearchMatchType{
|
||||
"NOPRESENT": object.MatchNotPresent,
|
||||
var searchUnaryOpVocabulary = map[string]objectSDK.SearchMatchType{
|
||||
"NOPRESENT": objectSDK.MatchNotPresent,
|
||||
}
|
||||
|
||||
var searchBinaryOpVocabulary = map[string]object.SearchMatchType{
|
||||
"EQ": object.MatchStringEqual,
|
||||
"NE": object.MatchStringNotEqual,
|
||||
"COMMON_PREFIX": object.MatchCommonPrefix,
|
||||
var searchBinaryOpVocabulary = map[string]objectSDK.SearchMatchType{
|
||||
"EQ": objectSDK.MatchStringEqual,
|
||||
"NE": objectSDK.MatchStringNotEqual,
|
||||
"COMMON_PREFIX": objectSDK.MatchCommonPrefix,
|
||||
}
|
||||
|
||||
func parseSearchFilters(cmd *cobra.Command) (object.SearchFilters, error) {
|
||||
var fs object.SearchFilters
|
||||
func parseSearchFilters(cmd *cobra.Command) (objectSDK.SearchFilters, error) {
|
||||
var fs objectSDK.SearchFilters
|
||||
|
||||
for i := range searchFilters {
|
||||
words := strings.Fields(searchFilters[i])
|
||||
|
@ -97,7 +97,7 @@ func parseSearchFilters(cmd *cobra.Command) (object.SearchFilters, error) {
|
|||
return nil, fmt.Errorf("could not read attributes filter from file: %w", err)
|
||||
}
|
||||
|
||||
subFs := object.NewSearchFilters()
|
||||
subFs := objectSDK.NewSearchFilters()
|
||||
|
||||
if err := subFs.UnmarshalJSON(data); err != nil {
|
||||
return nil, fmt.Errorf("could not unmarshal attributes filter from file: %w", err)
|
||||
|
@ -138,7 +138,7 @@ func parseSearchFilters(cmd *cobra.Command) (object.SearchFilters, error) {
|
|||
return nil, fmt.Errorf("could not parse object ID: %w", err)
|
||||
}
|
||||
|
||||
fs.AddObjectIDFilter(object.MatchStringEqual, id)
|
||||
fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id)
|
||||
}
|
||||
|
||||
return fs, nil
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -87,7 +87,7 @@ func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address
|
|||
func readObjectAddressBin(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID, filename string) oid.Address {
|
||||
buf, err := os.ReadFile(filename)
|
||||
commonCmd.ExitOnErr(cmd, "unable to read given file: %w", err)
|
||||
objTemp := object.New()
|
||||
objTemp := objectSDK.New()
|
||||
commonCmd.ExitOnErr(cmd, "can't unmarshal object from given file: %w", objTemp.Unmarshal(buf))
|
||||
|
||||
var addr oid.Address
|
||||
|
@ -278,7 +278,7 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client
|
|||
|
||||
common.PrintVerbose(cmd, "Opening remote session with the node...")
|
||||
|
||||
err := sessionCli.CreateSession(&tok, cli, sessionLifetime)
|
||||
err := sessionCli.CreateSession(cmd.Context(), &tok, cli, sessionLifetime)
|
||||
commonCmd.ExitOnErr(cmd, "open remote session: %w", err)
|
||||
|
||||
common.PrintVerbose(cmd, "Session successfully opened.")
|
||||
|
@ -354,9 +354,9 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
|
|||
|
||||
Prepare(cmd, &prmHead)
|
||||
|
||||
_, err := internal.HeadObject(prmHead)
|
||||
_, err := internal.HeadObject(cmd.Context(), prmHead)
|
||||
|
||||
var errSplit *object.SplitInfoError
|
||||
var errSplit *objectSDK.SplitInfoError
|
||||
|
||||
switch {
|
||||
default:
|
||||
|
@ -381,7 +381,7 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
|
|||
return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnr, obj)
|
||||
}
|
||||
|
||||
func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *object.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) {
|
||||
func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) {
|
||||
// collect split chain by the descending ease of operations (ease is evaluated heuristically).
|
||||
// If any approach fails, we don't try the next since we assume that it will fail too.
|
||||
|
||||
|
@ -396,7 +396,7 @@ func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *object.Spl
|
|||
prmHead.SetRawFlag(false)
|
||||
// client is already set
|
||||
|
||||
res, err := internal.HeadObject(prmHead)
|
||||
res, err := internal.HeadObject(cmd.Context(), prmHead)
|
||||
if err == nil {
|
||||
children := res.Header().Children()
|
||||
|
||||
|
@ -413,19 +413,19 @@ func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *object.Spl
|
|||
return nil, false
|
||||
}
|
||||
|
||||
func tryGetSplitMembersBySplitID(cmd *cobra.Command, splitInfo *object.SplitInfo, cli *client.Client, cnr cid.ID) ([]oid.ID, bool) {
|
||||
func tryGetSplitMembersBySplitID(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, cli *client.Client, cnr cid.ID) ([]oid.ID, bool) {
|
||||
if idSplit := splitInfo.SplitID(); idSplit != nil {
|
||||
common.PrintVerbose(cmd, "Collecting split members by split ID...")
|
||||
|
||||
var query object.SearchFilters
|
||||
query.AddSplitIDFilter(object.MatchStringEqual, idSplit)
|
||||
var query objectSDK.SearchFilters
|
||||
query.AddSplitIDFilter(objectSDK.MatchStringEqual, idSplit)
|
||||
|
||||
var prm internal.SearchObjectsPrm
|
||||
prm.SetContainerID(cnr)
|
||||
prm.SetClient(cli)
|
||||
prm.SetFilters(query)
|
||||
|
||||
res, err := internal.SearchObjects(prm)
|
||||
res, err := internal.SearchObjects(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "failed to search objects by split ID: %w", err)
|
||||
|
||||
parts := res.IDList()
|
||||
|
@ -437,7 +437,7 @@ func tryGetSplitMembersBySplitID(cmd *cobra.Command, splitInfo *object.SplitInfo
|
|||
return nil, false
|
||||
}
|
||||
|
||||
func tryRestoreChainInReverse(cmd *cobra.Command, splitInfo *object.SplitInfo, prmHead internal.HeadObjectPrm, cli *client.Client, cnr cid.ID, obj oid.ID) []oid.ID {
|
||||
func tryRestoreChainInReverse(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cli *client.Client, cnr cid.ID, obj oid.ID) []oid.ID {
|
||||
var addrObj oid.Address
|
||||
addrObj.SetContainer(cnr)
|
||||
|
||||
|
@ -463,7 +463,7 @@ func tryRestoreChainInReverse(cmd *cobra.Command, splitInfo *object.SplitInfo, p
|
|||
addrObj.SetObject(idMember)
|
||||
prmHead.SetAddress(addrObj)
|
||||
|
||||
res, err = internal.HeadObject(prmHead)
|
||||
res, err = internal.HeadObject(cmd.Context(), prmHead)
|
||||
commonCmd.ExitOnErr(cmd, "failed to read split chain member's header: %w", err)
|
||||
|
||||
idMember, ok = res.Header().PreviousID()
|
||||
|
@ -482,15 +482,15 @@ func tryRestoreChainInReverse(cmd *cobra.Command, splitInfo *object.SplitInfo, p
|
|||
|
||||
common.PrintVerbose(cmd, "Looking for a linking object...")
|
||||
|
||||
var query object.SearchFilters
|
||||
query.AddParentIDFilter(object.MatchStringEqual, obj)
|
||||
var query objectSDK.SearchFilters
|
||||
query.AddParentIDFilter(objectSDK.MatchStringEqual, obj)
|
||||
|
||||
var prmSearch internal.SearchObjectsPrm
|
||||
prmSearch.SetClient(cli)
|
||||
prmSearch.SetContainerID(cnr)
|
||||
prmSearch.SetFilters(query)
|
||||
|
||||
resSearch, err := internal.SearchObjects(prmSearch)
|
||||
resSearch, err := internal.SearchObjects(cmd.Context(), prmSearch)
|
||||
commonCmd.ExitOnErr(cmd, "failed to find object children: %w", err)
|
||||
|
||||
list := resSearch.IDList()
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
|
@ -28,10 +30,12 @@ var createCmd = &cobra.Command{
|
|||
Use: "create",
|
||||
Short: "Create session token",
|
||||
Run: createSession,
|
||||
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
|
||||
_ = viper.BindPFlag(commonflags.Account, cmd.Flags().Lookup(commonflags.Account))
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -64,7 +68,7 @@ func createSession(cmd *cobra.Command, _ []string) {
|
|||
|
||||
var tok session.Object
|
||||
|
||||
err = CreateSession(&tok, c, lifetime)
|
||||
err = CreateSession(cmd.Context(), &tok, c, lifetime)
|
||||
commonCmd.ExitOnErr(cmd, "can't create session: %w", err)
|
||||
|
||||
var data []byte
|
||||
|
@ -86,11 +90,11 @@ func createSession(cmd *cobra.Command, _ []string) {
|
|||
// number of epochs.
|
||||
//
|
||||
// Fills ID, lifetime and session key.
|
||||
func CreateSession(dst *session.Object, c *client.Client, lifetime uint64) error {
|
||||
func CreateSession(ctx context.Context, dst *session.Object, c *client.Client, lifetime uint64) error {
|
||||
var netInfoPrm internalclient.NetworkInfoPrm
|
||||
netInfoPrm.SetClient(c)
|
||||
|
||||
ni, err := internalclient.NetworkInfo(netInfoPrm)
|
||||
ni, err := internalclient.NetworkInfo(ctx, netInfoPrm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch network info: %w", err)
|
||||
}
|
||||
|
@ -102,7 +106,7 @@ func CreateSession(dst *session.Object, c *client.Client, lifetime uint64) error
|
|||
sessionPrm.SetClient(c)
|
||||
sessionPrm.SetExp(exp)
|
||||
|
||||
sessionRes, err := internalclient.CreateSession(sessionPrm)
|
||||
sessionRes, err := internalclient.CreateSession(ctx, sessionPrm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open session: %w", err)
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
|
@ -49,24 +50,29 @@ func add(cmd *cobra.Command, _ []string) {
|
|||
ctx := cmd.Context()
|
||||
|
||||
cli, err := _client(ctx)
|
||||
commonCmd.ExitOnErr(cmd, "client: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
var bt []byte
|
||||
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
|
||||
bt = t.Marshal()
|
||||
}
|
||||
|
||||
req := new(tree.AddRequest)
|
||||
req.Body = &tree.AddRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
TreeId: tid,
|
||||
ParentId: pid,
|
||||
Meta: meta,
|
||||
BearerToken: nil, // TODO: #1891 add token handling
|
||||
BearerToken: bt,
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
|
||||
|
||||
resp, err := cli.Add(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "failed to cal add: %w", err)
|
||||
|
||||
cmd.Println("Node ID: ", resp.Body.NodeId)
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -53,7 +53,7 @@ func addByPath(cmd *cobra.Command, _ []string) {
|
|||
ctx := cmd.Context()
|
||||
|
||||
cli, err := _client(ctx)
|
||||
commonCmd.ExitOnErr(cmd, "client: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
@ -62,23 +62,27 @@ func addByPath(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
|
||||
|
||||
path, _ := cmd.Flags().GetString(pathFlagKey)
|
||||
// pAttr, _ := cmd.Flags().GetString(pathAttributeFlagKey)
|
||||
|
||||
var bt []byte
|
||||
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
|
||||
bt = t.Marshal()
|
||||
}
|
||||
|
||||
req := new(tree.AddByPathRequest)
|
||||
req.Body = &tree.AddByPathRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
TreeId: tid,
|
||||
PathAttribute: object.AttributeFileName,
|
||||
PathAttribute: objectSDK.AttributeFileName,
|
||||
// PathAttribute: pAttr,
|
||||
Path: strings.Split(path, "/"),
|
||||
Meta: meta,
|
||||
BearerToken: nil, // TODO: #1891 add token handling
|
||||
BearerToken: bt,
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
|
||||
|
||||
resp, err := cli.AddByPath(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "failed to addByPath %w", err)
|
||||
|
||||
cmd.Printf("Parent ID: %d\n", resp.GetBody().GetParentId())
|
||||
|
||||
|
|
|
@ -5,10 +5,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
@ -26,10 +27,12 @@ func _client(ctx context.Context) (tree.TreeServiceClient, error) {
|
|||
opts := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
grpc.WithChainUnaryInterceptor(
|
||||
tracing.NewGRPCUnaryClientInteceptor(),
|
||||
metrics.NewUnaryClientInterceptor(),
|
||||
tracing.NewUnaryClientInteceptor(),
|
||||
),
|
||||
grpc.WithChainStreamInterceptor(
|
||||
tracing.NewGRPCStreamClientInterceptor(),
|
||||
metrics.NewStreamClientInterceptor(),
|
||||
tracing.NewStreamClientInterceptor(),
|
||||
),
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -53,31 +53,35 @@ func getByPath(cmd *cobra.Command, _ []string) {
|
|||
ctx := cmd.Context()
|
||||
|
||||
cli, err := _client(ctx)
|
||||
commonCmd.ExitOnErr(cmd, "client: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
latestOnly, _ := cmd.Flags().GetBool(latestOnlyFlagKey)
|
||||
path, _ := cmd.Flags().GetString(pathFlagKey)
|
||||
// pAttr, _ := cmd.Flags().GetString(pathAttributeFlagKey)
|
||||
|
||||
var bt []byte
|
||||
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
|
||||
bt = t.Marshal()
|
||||
}
|
||||
|
||||
req := new(tree.GetNodeByPathRequest)
|
||||
req.Body = &tree.GetNodeByPathRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
TreeId: tid,
|
||||
PathAttribute: object.AttributeFileName,
|
||||
PathAttribute: objectSDK.AttributeFileName,
|
||||
// PathAttribute: pAttr,
|
||||
Path: strings.Split(path, "/"),
|
||||
LatestOnly: latestOnly,
|
||||
AllAttributes: true,
|
||||
BearerToken: nil, // TODO: #1891 add token handling
|
||||
BearerToken: bt,
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
|
||||
|
||||
resp, err := cli.GetNodeByPath(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "failed to call getNodeByPath: %w", err)
|
||||
|
||||
nn := resp.GetBody().GetNodes()
|
||||
if len(nn) == 0 {
|
||||
|
|
83
cmd/frostfs-cli/modules/tree/get_op_log.go
Normal file
83
cmd/frostfs-cli/modules/tree/get_op_log.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
package tree
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var getOpLogCmd = &cobra.Command{
|
||||
Use: "get-op-log",
|
||||
Short: "Get logged operations starting with some height",
|
||||
Run: getOpLog,
|
||||
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
|
||||
commonflags.Bind(cmd)
|
||||
},
|
||||
}
|
||||
|
||||
func initGetOpLogCmd() {
|
||||
commonflags.Init(getOpLogCmd)
|
||||
initCTID(getOpLogCmd)
|
||||
|
||||
ff := getOpLogCmd.Flags()
|
||||
ff.Uint64(heightFlagKey, 0, "Height to start with")
|
||||
ff.Uint64(countFlagKey, 10, "Logged operations count")
|
||||
|
||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
||||
}
|
||||
|
||||
func getOpLog(cmd *cobra.Command, _ []string) {
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
|
||||
cidRaw, _ := cmd.Flags().GetString(commonflags.CIDFlag)
|
||||
|
||||
var cnr cid.ID
|
||||
err := cnr.DecodeString(cidRaw)
|
||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||
|
||||
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
||||
ctx := cmd.Context()
|
||||
|
||||
cli, err := _client(ctx)
|
||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
height, _ := cmd.Flags().GetUint64(heightFlagKey)
|
||||
count, _ := cmd.Flags().GetUint64(countFlagKey)
|
||||
|
||||
req := &tree.GetOpLogRequest{
|
||||
Body: &tree.GetOpLogRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
TreeId: tid,
|
||||
Height: height,
|
||||
Count: count,
|
||||
},
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
|
||||
|
||||
resp, err := cli.GetOpLog(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "get op log: %w", err)
|
||||
|
||||
opLogResp, err := resp.Recv()
|
||||
for ; err == nil; opLogResp, err = resp.Recv() {
|
||||
o := opLogResp.GetBody().GetOperation()
|
||||
|
||||
cmd.Println("Parent ID: ", o.GetParentId())
|
||||
|
||||
cmd.Println("\tChild ID: ", o.GetChildId())
|
||||
cmd.Printf("\tMeta: %s\n", o.GetMeta())
|
||||
}
|
||||
if !errors.Is(err, io.EOF) {
|
||||
commonCmd.ExitOnErr(cmd, "get op log response stream: %w", err)
|
||||
}
|
||||
}
|
43
cmd/frostfs-cli/modules/tree/healthcheck.go
Normal file
43
cmd/frostfs-cli/modules/tree/healthcheck.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package tree
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var healthcheckCmd = &cobra.Command{
|
||||
Use: "healthcheck",
|
||||
Short: "Check tree service availability",
|
||||
Run: healthcheck,
|
||||
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
|
||||
commonflags.Bind(cmd)
|
||||
},
|
||||
}
|
||||
|
||||
func initHealthcheckCmd() {
|
||||
commonflags.Init(healthcheckCmd)
|
||||
ff := healthcheckCmd.Flags()
|
||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
||||
}
|
||||
|
||||
func healthcheck(cmd *cobra.Command, _ []string) {
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
ctx := cmd.Context()
|
||||
|
||||
cli, err := _client(ctx)
|
||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||
|
||||
req := &tree.HealthcheckRequest{
|
||||
Body: &tree.HealthcheckRequest_Body{},
|
||||
}
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
|
||||
|
||||
_, err = cli.Healthcheck(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "failed to call healthcheck: %w", err)
|
||||
|
||||
common.PrintVerbose(cmd, "Successful healthcheck invocation.")
|
||||
}
|
|
@ -41,7 +41,7 @@ func list(cmd *cobra.Command, _ []string) {
|
|||
ctx := cmd.Context()
|
||||
|
||||
cli, err := _client(ctx)
|
||||
commonCmd.ExitOnErr(cmd, "client: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
@ -52,10 +52,10 @@ func list(cmd *cobra.Command, _ []string) {
|
|||
},
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
|
||||
|
||||
resp, err := cli.TreeList(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "failed to call treeList %w", err)
|
||||
|
||||
for _, treeID := range resp.GetBody().GetIds() {
|
||||
cmd.Println(treeID)
|
||||
|
|
107
cmd/frostfs-cli/modules/tree/move.go
Normal file
107
cmd/frostfs-cli/modules/tree/move.go
Normal file
|
@ -0,0 +1,107 @@
|
|||
package tree
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var moveCmd = &cobra.Command{
|
||||
Use: "move",
|
||||
Short: "Move node",
|
||||
Run: move,
|
||||
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
|
||||
commonflags.Bind(cmd)
|
||||
},
|
||||
}
|
||||
|
||||
func initMoveCmd() {
|
||||
commonflags.Init(moveCmd)
|
||||
initCTID(moveCmd)
|
||||
|
||||
ff := moveCmd.Flags()
|
||||
ff.Uint64(nodeIDFlagKey, 0, "Node ID.")
|
||||
ff.Uint64(parentIDFlagKey, 0, "Parent ID.")
|
||||
|
||||
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
|
||||
_ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey)
|
||||
|
||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
||||
}
|
||||
|
||||
func move(cmd *cobra.Command, _ []string) {
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag)
|
||||
|
||||
var cnr cid.ID
|
||||
err := cnr.DecodeString(cidString)
|
||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||
|
||||
ctx := cmd.Context()
|
||||
|
||||
cli, err := _client(ctx)
|
||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
||||
pid, _ := cmd.Flags().GetUint64(parentIDFlagKey)
|
||||
nid, _ := cmd.Flags().GetUint64(nodeIDFlagKey)
|
||||
|
||||
var bt []byte
|
||||
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
|
||||
bt = t.Marshal()
|
||||
}
|
||||
|
||||
subTreeReq := &tree.GetSubTreeRequest{
|
||||
Body: &tree.GetSubTreeRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
TreeId: tid,
|
||||
RootId: nid,
|
||||
Depth: 1,
|
||||
BearerToken: bt,
|
||||
},
|
||||
}
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(subTreeReq, pk))
|
||||
resp, err := cli.GetSubTree(ctx, subTreeReq)
|
||||
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
|
||||
|
||||
var meta []*tree.KeyValue
|
||||
subtreeResp, err := resp.Recv()
|
||||
for ; err == nil; subtreeResp, err = resp.Recv() {
|
||||
meta = subtreeResp.GetBody().GetMeta()
|
||||
}
|
||||
if !errors.Is(err, io.EOF) {
|
||||
commonCmd.ExitOnErr(cmd, "failed to read getSubTree response stream: %w", err)
|
||||
}
|
||||
var metaErr error
|
||||
if len(meta) == 0 {
|
||||
metaErr = errors.New("no meta for given node ID")
|
||||
}
|
||||
commonCmd.ExitOnErr(cmd, "unexpected rpc call result: %w", metaErr)
|
||||
|
||||
req := &tree.MoveRequest{
|
||||
Body: &tree.MoveRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
TreeId: tid,
|
||||
ParentId: pid,
|
||||
NodeId: nid,
|
||||
Meta: meta,
|
||||
},
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
|
||||
|
||||
_, err = cli.Move(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "failed to call move: %w", err)
|
||||
common.PrintVerbose(cmd, "Successful move invocation.")
|
||||
}
|
74
cmd/frostfs-cli/modules/tree/remove.go
Normal file
74
cmd/frostfs-cli/modules/tree/remove.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
package tree
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var removeCmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove node",
|
||||
Run: remove,
|
||||
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
|
||||
commonflags.Bind(cmd)
|
||||
},
|
||||
}
|
||||
|
||||
func initRemoveCmd() {
|
||||
commonflags.Init(removeCmd)
|
||||
initCTID(removeCmd)
|
||||
|
||||
ff := removeCmd.Flags()
|
||||
ff.Uint64(nodeIDFlagKey, 0, "Node ID.")
|
||||
|
||||
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
|
||||
|
||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
||||
}
|
||||
|
||||
func remove(cmd *cobra.Command, _ []string) {
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag)
|
||||
|
||||
var cnr cid.ID
|
||||
err := cnr.DecodeString(cidString)
|
||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||
|
||||
ctx := cmd.Context()
|
||||
|
||||
cli, err := _client(ctx)
|
||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
||||
|
||||
nid, _ := cmd.Flags().GetUint64(nodeIDFlagKey)
|
||||
|
||||
var bt []byte
|
||||
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
|
||||
bt = t.Marshal()
|
||||
}
|
||||
req := &tree.RemoveRequest{
|
||||
Body: &tree.RemoveRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
TreeId: tid,
|
||||
NodeId: nid,
|
||||
BearerToken: bt,
|
||||
},
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
|
||||
|
||||
_, err = cli.Remove(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "failed to call remove: %w", err)
|
||||
common.PrintVerbose(cmd, "Successful remove invocation.")
|
||||
}
|
|
@ -15,16 +15,28 @@ func init() {
|
|||
Cmd.AddCommand(getByPathCmd)
|
||||
Cmd.AddCommand(addByPathCmd)
|
||||
Cmd.AddCommand(listCmd)
|
||||
Cmd.AddCommand(healthcheckCmd)
|
||||
Cmd.AddCommand(moveCmd)
|
||||
Cmd.AddCommand(removeCmd)
|
||||
Cmd.AddCommand(getSubtreeCmd)
|
||||
Cmd.AddCommand(getOpLogCmd)
|
||||
|
||||
initAddCmd()
|
||||
initGetByPathCmd()
|
||||
initAddByPathCmd()
|
||||
initListCmd()
|
||||
initHealthcheckCmd()
|
||||
initMoveCmd()
|
||||
initRemoveCmd()
|
||||
initGetSubtreeCmd()
|
||||
initGetOpLogCmd()
|
||||
}
|
||||
|
||||
const (
|
||||
treeIDFlagKey = "tid"
|
||||
parentIDFlagKey = "pid"
|
||||
nodeIDFlagKey = "nid"
|
||||
rootIDFlagKey = "root"
|
||||
|
||||
metaFlagKey = "meta"
|
||||
|
||||
|
@ -32,6 +44,12 @@ const (
|
|||
pathAttributeFlagKey = "pattr"
|
||||
|
||||
latestOnlyFlagKey = "latest"
|
||||
|
||||
bearerFlagKey = "bearer"
|
||||
|
||||
heightFlagKey = "height"
|
||||
countFlagKey = "count"
|
||||
depthFlagKey = "depth"
|
||||
)
|
||||
|
||||
func initCTID(cmd *cobra.Command) {
|
||||
|
@ -42,4 +60,6 @@ func initCTID(cmd *cobra.Command) {
|
|||
|
||||
ff.String(treeIDFlagKey, "", "Tree ID")
|
||||
_ = cmd.MarkFlagRequired(treeIDFlagKey)
|
||||
|
||||
ff.String(bearerFlagKey, "", "Path to bearer token")
|
||||
}
|
||||
|
|
101
cmd/frostfs-cli/modules/tree/subtree.go
Normal file
101
cmd/frostfs-cli/modules/tree/subtree.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
package tree
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var getSubtreeCmd = &cobra.Command{
|
||||
Use: "get-subtree",
|
||||
Short: "Get subtree",
|
||||
Run: getSubTree,
|
||||
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
|
||||
commonflags.Bind(cmd)
|
||||
},
|
||||
}
|
||||
|
||||
func initGetSubtreeCmd() {
|
||||
commonflags.Init(getSubtreeCmd)
|
||||
initCTID(getSubtreeCmd)
|
||||
|
||||
ff := getSubtreeCmd.Flags()
|
||||
ff.Uint64(rootIDFlagKey, 0, "Root ID to traverse from.")
|
||||
ff.Uint32(depthFlagKey, 10, "Traversal depth.")
|
||||
|
||||
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
|
||||
|
||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
||||
}
|
||||
|
||||
func getSubTree(cmd *cobra.Command, _ []string) {
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag)
|
||||
|
||||
var cnr cid.ID
|
||||
err := cnr.DecodeString(cidString)
|
||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||
|
||||
ctx := cmd.Context()
|
||||
|
||||
cli, err := _client(ctx)
|
||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
||||
|
||||
rid, _ := cmd.Flags().GetUint64(rootIDFlagKey)
|
||||
|
||||
depth, _ := cmd.Flags().GetUint32(depthFlagKey)
|
||||
|
||||
var bt []byte
|
||||
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
|
||||
bt = t.Marshal()
|
||||
}
|
||||
|
||||
req := &tree.GetSubTreeRequest{
|
||||
Body: &tree.GetSubTreeRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
TreeId: tid,
|
||||
RootId: rid,
|
||||
Depth: depth,
|
||||
BearerToken: bt,
|
||||
},
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
|
||||
|
||||
resp, err := cli.GetSubTree(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "failed to call getSubTree: %w", err)
|
||||
|
||||
subtreeResp, err := resp.Recv()
|
||||
for ; err == nil; subtreeResp, err = resp.Recv() {
|
||||
b := subtreeResp.GetBody()
|
||||
|
||||
cmd.Printf("Node ID: %d\n", b.GetNodeId())
|
||||
|
||||
cmd.Println("\tParent ID: ", b.GetParentId())
|
||||
cmd.Println("\tTimestamp: ", b.GetTimestamp())
|
||||
|
||||
if meta := b.GetMeta(); len(meta) > 0 {
|
||||
cmd.Println("\tMeta pairs: ")
|
||||
for _, kv := range meta {
|
||||
cmd.Printf("\t\t%s: %s\n", kv.GetKey(), string(kv.GetValue()))
|
||||
}
|
||||
}
|
||||
}
|
||||
if !errors.Is(err, io.EOF) {
|
||||
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
|
||||
}
|
||||
}
|
|
@ -60,6 +60,13 @@ func watchForSignal(cancel func()) {
|
|||
if err != nil {
|
||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
}
|
||||
pprofCmp.reload()
|
||||
metricsCmp.reload()
|
||||
log.Info(logs.FrostFSIRReloadExtraWallets)
|
||||
err = innerRing.SetExtraWallets(cfg)
|
||||
if err != nil {
|
||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
}
|
||||
log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
case syscall.SIGTERM, syscall.SIGINT:
|
||||
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
|
|
|
@ -51,9 +51,8 @@ func setControlDefaults(cfg *viper.Viper) {
|
|||
|
||||
func setFeeDefaults(cfg *viper.Viper) {
|
||||
// extra fee values for working mode without notary contract
|
||||
cfg.SetDefault("fee.main_chain", 5000_0000) // 0.5 Fixed8
|
||||
cfg.SetDefault("fee.side_chain", 2_0000_0000) // 2.0 Fixed8
|
||||
cfg.SetDefault("fee.named_container_register", 25_0000_0000) // 25.0 Fixed8
|
||||
cfg.SetDefault("fee.main_chain", 5000_0000) // 0.5 Fixed8
|
||||
cfg.SetDefault("fee.side_chain", 2_0000_0000) // 2.0 Fixed8
|
||||
}
|
||||
|
||||
func setEmitDefaults(cfg *viper.Viper) {
|
||||
|
|
87
cmd/frostfs-ir/httpcomponent.go
Normal file
87
cmd/frostfs-ir/httpcomponent.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type httpComponent struct {
|
||||
srv *httputil.Server
|
||||
address string
|
||||
name string
|
||||
handler http.Handler
|
||||
shutdownDur time.Duration
|
||||
enabled bool
|
||||
}
|
||||
|
||||
const (
|
||||
enabledKeyPostfix = ".enabled"
|
||||
addressKeyPostfix = ".address"
|
||||
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
|
||||
)
|
||||
|
||||
func (c *httpComponent) init() {
|
||||
log.Info(fmt.Sprintf("init %s", c.name))
|
||||
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
|
||||
c.address = cfg.GetString(c.name + addressKeyPostfix)
|
||||
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
|
||||
|
||||
if c.enabled {
|
||||
c.srv = httputil.New(
|
||||
httputil.HTTPSrvPrm{
|
||||
Address: c.address,
|
||||
Handler: c.handler,
|
||||
},
|
||||
httputil.WithShutdownTimeout(c.shutdownDur),
|
||||
)
|
||||
} else {
|
||||
log.Info(fmt.Sprintf("%s is disabled, skip", c.name))
|
||||
c.srv = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpComponent) start() {
|
||||
if c.srv != nil {
|
||||
log.Info(fmt.Sprintf("start %s", c.name))
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
exitErr(c.srv.Serve())
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpComponent) shutdown() error {
|
||||
if c.srv != nil {
|
||||
log.Info(fmt.Sprintf("shutdown %s", c.name))
|
||||
return c.srv.Shutdown()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *httpComponent) needReload() bool {
|
||||
enabled := cfg.GetBool(c.name + enabledKeyPostfix)
|
||||
address := cfg.GetString(c.name + addressKeyPostfix)
|
||||
dur := cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
|
||||
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
|
||||
}
|
||||
|
||||
func (c *httpComponent) reload() {
|
||||
log.Info(fmt.Sprintf("reload %s", c.name))
|
||||
if c.needReload() {
|
||||
log.Info(fmt.Sprintf("%s config updated", c.name))
|
||||
if err := c.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
} else {
|
||||
c.init()
|
||||
c.start()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -4,16 +4,13 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -29,15 +26,16 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
wg = new(sync.WaitGroup)
|
||||
intErr = make(chan error) // internal inner ring errors
|
||||
logPrm = new(logger.Prm)
|
||||
innerRing *innerring.Server
|
||||
httpServers []*httputil.Server
|
||||
log *logger.Logger
|
||||
cfg *viper.Viper
|
||||
configFile *string
|
||||
configDir *string
|
||||
wg = new(sync.WaitGroup)
|
||||
intErr = make(chan error) // internal inner ring errors
|
||||
logPrm = new(logger.Prm)
|
||||
innerRing *innerring.Server
|
||||
pprofCmp *pprofComponent
|
||||
metricsCmp *httpComponent
|
||||
log *logger.Logger
|
||||
cfg *viper.Viper
|
||||
configFile *string
|
||||
configDir *string
|
||||
)
|
||||
|
||||
func exitErr(err error) {
|
||||
|
@ -63,6 +61,7 @@ func main() {
|
|||
cfg, err = newConfig()
|
||||
exitErr(err)
|
||||
|
||||
logPrm.MetricsNamespace = "frostfs_ir"
|
||||
err = logPrm.SetLevelString(
|
||||
cfg.GetString("logger.level"),
|
||||
)
|
||||
|
@ -73,19 +72,17 @@ func main() {
|
|||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
initHTTPServers(cfg)
|
||||
pprofCmp = newPprofComponent()
|
||||
pprofCmp.init()
|
||||
|
||||
metricsCmp = newMetricsComponent()
|
||||
metricsCmp.init()
|
||||
|
||||
innerRing, err = innerring.New(ctx, log, cfg, intErr)
|
||||
exitErr(err)
|
||||
|
||||
// start HTTP servers
|
||||
for _, srv := range httpServers {
|
||||
wg.Add(1)
|
||||
go func(srv *httputil.Server) {
|
||||
exitErr(srv.Serve())
|
||||
wg.Done()
|
||||
}(srv)
|
||||
}
|
||||
pprofCmp.start()
|
||||
metricsCmp.start()
|
||||
|
||||
// start inner ring
|
||||
err = innerRing.Start(ctx, intErr)
|
||||
|
@ -103,54 +100,16 @@ func main() {
|
|||
log.Info(logs.FrostFSIRApplicationStopped)
|
||||
}
|
||||
|
||||
func initHTTPServers(cfg *viper.Viper) {
|
||||
items := []struct {
|
||||
cfgPrefix string
|
||||
handler func() http.Handler
|
||||
}{
|
||||
{"pprof", httputil.Handler},
|
||||
{"prometheus", promhttp.Handler},
|
||||
func shutdown() {
|
||||
innerRing.Stop()
|
||||
if err := metricsCmp.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
||||
httpServers = make([]*httputil.Server, 0, len(items))
|
||||
|
||||
for _, item := range items {
|
||||
if !cfg.GetBool(item.cfgPrefix + ".enabled") {
|
||||
log.Info(item.cfgPrefix + " is disabled, skip")
|
||||
continue
|
||||
}
|
||||
|
||||
addr := cfg.GetString(item.cfgPrefix + ".address")
|
||||
|
||||
var prm httputil.HTTPSrvPrm
|
||||
|
||||
prm.Address = addr
|
||||
prm.Handler = item.handler()
|
||||
|
||||
httpServers = append(httpServers,
|
||||
httputil.New(prm,
|
||||
httputil.WithShutdownTimeout(
|
||||
cfg.GetDuration(item.cfgPrefix+".shutdown_timeout"),
|
||||
),
|
||||
),
|
||||
if err := pprofCmp.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
innerRing.Stop()
|
||||
|
||||
// shut down HTTP servers
|
||||
for _, srv := range httpServers {
|
||||
wg.Add(1)
|
||||
go func(srv *httputil.Server) {
|
||||
err := srv.Shutdown()
|
||||
if err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
wg.Done()
|
||||
}(srv)
|
||||
}
|
||||
}
|
||||
|
|
12
cmd/frostfs-ir/metrics.go
Normal file
12
cmd/frostfs-ir/metrics.go
Normal file
|
@ -0,0 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
|
||||
)
|
||||
|
||||
func newMetricsComponent() *httpComponent {
|
||||
return &httpComponent{
|
||||
name: "prometheus",
|
||||
handler: metrics.Handler(),
|
||||
}
|
||||
}
|
68
cmd/frostfs-ir/pprof.go
Normal file
68
cmd/frostfs-ir/pprof.go
Normal file
|
@ -0,0 +1,68 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type pprofComponent struct {
|
||||
httpComponent
|
||||
blockRate int
|
||||
mutexRate int
|
||||
}
|
||||
|
||||
const (
|
||||
pprofBlockRateKey = "pprof.block_rate"
|
||||
pprofMutexRateKey = "pprof.mutex_rate"
|
||||
)
|
||||
|
||||
func newPprofComponent() *pprofComponent {
|
||||
return &pprofComponent{
|
||||
httpComponent: httpComponent{
|
||||
name: "pprof",
|
||||
handler: httputil.Handler(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *pprofComponent) init() {
|
||||
c.httpComponent.init()
|
||||
|
||||
if c.enabled {
|
||||
c.blockRate = cfg.GetInt(pprofBlockRateKey)
|
||||
c.mutexRate = cfg.GetInt(pprofMutexRateKey)
|
||||
runtime.SetBlockProfileRate(c.blockRate)
|
||||
runtime.SetMutexProfileFraction(c.mutexRate)
|
||||
} else {
|
||||
c.blockRate = 0
|
||||
c.mutexRate = 0
|
||||
runtime.SetBlockProfileRate(0)
|
||||
runtime.SetMutexProfileFraction(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *pprofComponent) needReload() bool {
|
||||
blockRate := cfg.GetInt(pprofBlockRateKey)
|
||||
mutexRate := cfg.GetInt(pprofMutexRateKey)
|
||||
return c.httpComponent.needReload() ||
|
||||
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
|
||||
}
|
||||
|
||||
func (c *pprofComponent) reload() {
|
||||
log.Info(fmt.Sprintf("reload %s", c.name))
|
||||
if c.needReload() {
|
||||
log.Info(fmt.Sprintf("%s config updated", c.name))
|
||||
if err := c.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
c.init()
|
||||
c.start()
|
||||
}
|
||||
}
|
|
@ -3,7 +3,7 @@ package blobovnicza
|
|||
import (
|
||||
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -38,7 +38,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
|
|||
|
||||
data := res.Object()
|
||||
|
||||
var o object.Object
|
||||
var o objectSDK.Object
|
||||
common.ExitOnErr(cmd, common.Errf("could not unmarshal object: %w",
|
||||
o.Unmarshal(data)),
|
||||
)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package blobovnicza
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
|
@ -33,6 +34,6 @@ func listFunc(cmd *cobra.Command, _ []string) {
|
|||
blz := openBlobovnicza(cmd)
|
||||
defer blz.Close()
|
||||
|
||||
err := blobovnicza.IterateAddresses(blz, wAddr)
|
||||
err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr)
|
||||
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -49,7 +49,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
|
|||
prm.SetAddress(addr)
|
||||
prm.SetRaw(true)
|
||||
|
||||
siErr := new(object.SplitInfoError)
|
||||
siErr := new(objectSDK.SplitInfoError)
|
||||
|
||||
res, err := db.Get(cmd.Context(), prm)
|
||||
if errors.As(err, &siErr) {
|
||||
|
|
|
@ -28,6 +28,6 @@ func listGarbageFunc(cmd *cobra.Command, _ []string) {
|
|||
return nil
|
||||
})
|
||||
|
||||
err := db.IterateOverGarbage(garbPrm)
|
||||
err := db.IterateOverGarbage(cmd.Context(), garbPrm)
|
||||
common.ExitOnErr(cmd, common.Errf("could not iterate over garbage bucket: %w", err))
|
||||
}
|
||||
|
|
|
@ -33,6 +33,6 @@ func listGraveyardFunc(cmd *cobra.Command, _ []string) {
|
|||
return nil
|
||||
})
|
||||
|
||||
err := db.IterateOverGraveyard(gravePrm)
|
||||
err := db.IterateOverGraveyard(cmd.Context(), gravePrm)
|
||||
common.ExitOnErr(cmd, common.Errf("could not iterate over graveyard bucket: %w", err))
|
||||
}
|
||||
|
|
|
@ -4,14 +4,14 @@ import (
|
|||
"os"
|
||||
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// PrintObjectHeader prints passed object's header fields via
|
||||
// the passed cobra command. Does nothing with the payload.
|
||||
func PrintObjectHeader(cmd *cobra.Command, h object.Object) {
|
||||
func PrintObjectHeader(cmd *cobra.Command, h objectSDK.Object) {
|
||||
cmd.Println("Version:", h.Version())
|
||||
cmd.Println("Type:", h.Type())
|
||||
printContainerID(cmd, h.ContainerID)
|
||||
|
|
|
@ -3,7 +3,7 @@ package writecache
|
|||
import (
|
||||
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -27,7 +27,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
|
|||
data, err := writecache.Get(db, []byte(vAddress))
|
||||
common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
|
||||
|
||||
var o object.Object
|
||||
var o objectSDK.Object
|
||||
common.ExitOnErr(cmd, common.Errf("could not unmarshal object: %w", o.Unmarshal(data)))
|
||||
|
||||
common.PrintObjectHeader(cmd, o)
|
||||
|
|
|
@ -21,10 +21,8 @@ func initAccountingService(ctx context.Context, c *cfg) {
|
|||
server := accountingTransportGRPC.New(
|
||||
accountingService.NewSignService(
|
||||
&c.key.PrivateKey,
|
||||
accountingService.NewResponseService(
|
||||
accountingService.NewExecutionService(
|
||||
accounting.NewExecutor(balanceMorphWrapper),
|
||||
),
|
||||
accountingService.NewExecutionService(
|
||||
accounting.NewExecutor(balanceMorphWrapper),
|
||||
c.respSvc,
|
||||
),
|
||||
),
|
||||
|
|
|
@ -25,19 +25,18 @@ type valueWithTime[V any] struct {
|
|||
}
|
||||
|
||||
type locker struct {
|
||||
mtx *sync.Mutex
|
||||
mtx sync.Mutex
|
||||
waiters int // not protected by mtx, must used outer mutex to update concurrently
|
||||
}
|
||||
|
||||
type keyLocker[K comparable] struct {
|
||||
lockers map[K]*locker
|
||||
lockersMtx *sync.Mutex
|
||||
lockersMtx sync.Mutex
|
||||
}
|
||||
|
||||
func newKeyLocker[K comparable]() *keyLocker[K] {
|
||||
return &keyLocker[K]{
|
||||
lockers: make(map[K]*locker),
|
||||
lockersMtx: &sync.Mutex{},
|
||||
lockers: make(map[K]*locker),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,7 +52,6 @@ func (l *keyLocker[K]) LockKey(key K) {
|
|||
}
|
||||
|
||||
locker := &locker{
|
||||
mtx: &sync.Mutex{},
|
||||
waiters: 1,
|
||||
}
|
||||
locker.mtx.Lock()
|
||||
|
|
|
@ -11,12 +11,11 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
atomicstd "sync/atomic"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
|
||||
contractsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/contracts"
|
||||
|
@ -37,6 +36,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||
lsmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||
shardmode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||
|
@ -60,6 +60,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
|
@ -68,7 +69,6 @@ import (
|
|||
neogoutil "github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"go.etcd.io/bbolt"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
@ -98,6 +98,7 @@ type applicationConfiguration struct {
|
|||
errorThreshold uint32
|
||||
shardPoolSize uint32
|
||||
shards []shardCfg
|
||||
lowMem bool
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -200,6 +201,7 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
|||
|
||||
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
|
||||
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
|
||||
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
|
||||
|
||||
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
|
||||
}
|
||||
|
@ -321,20 +323,22 @@ func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *s
|
|||
// helpers and fields.
|
||||
type internals struct {
|
||||
done chan struct{}
|
||||
ctxCancel func()
|
||||
internalErr chan error // channel for internal application errors at runtime
|
||||
|
||||
appCfg *config.Config
|
||||
|
||||
log *logger.Logger
|
||||
|
||||
wg *sync.WaitGroup
|
||||
wg sync.WaitGroup
|
||||
workers []worker
|
||||
closers []closer
|
||||
|
||||
apiVersion version.Version
|
||||
healthStatus *atomic.Int32
|
||||
// is node under maintenance
|
||||
isMaintenance atomic.Bool
|
||||
isMaintenance atomic.Bool
|
||||
isOnlineCandidate bool
|
||||
}
|
||||
|
||||
// starts node's maintenance.
|
||||
|
@ -373,7 +377,7 @@ type shared struct {
|
|||
ownerIDFromKey user.ID // user ID calculated from key
|
||||
|
||||
// current network map
|
||||
netMap atomicstd.Value // type netmap.NetMap
|
||||
netMap atomic.Value // type netmap.NetMap
|
||||
netMapSource netmapCore.Source
|
||||
|
||||
cnrClient *containerClient.Client
|
||||
|
@ -542,6 +546,8 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
logPrm, err := c.loggerPrm()
|
||||
fatalOnErr(err)
|
||||
|
||||
logPrm.MetricsNamespace = "frostfs_node"
|
||||
|
||||
log, err := logger.NewLogger(logPrm)
|
||||
fatalOnErr(err)
|
||||
|
||||
|
@ -581,14 +587,16 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
}
|
||||
|
||||
func initInternals(appCfg *config.Config, log *logger.Logger) internals {
|
||||
var healthStatus atomic.Int32
|
||||
healthStatus.Store(int32(control.HealthStatus_HEALTH_STATUS_UNDEFINED))
|
||||
|
||||
return internals{
|
||||
done: make(chan struct{}),
|
||||
appCfg: appCfg,
|
||||
internalErr: make(chan error),
|
||||
log: log,
|
||||
wg: new(sync.WaitGroup),
|
||||
apiVersion: version.Current(),
|
||||
healthStatus: atomic.NewInt32(int32(control.HealthStatus_HEALTH_STATUS_UNDEFINED)),
|
||||
healthStatus: &healthStatus,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -614,7 +622,7 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
|
|||
key: key,
|
||||
binPublicKey: key.PublicKey().Bytes(),
|
||||
localAddr: netAddr,
|
||||
respSvc: response.NewService(response.WithNetworkState(netState)),
|
||||
respSvc: response.NewService(netState),
|
||||
clientCache: cache.NewSDKClientCache(cacheOpts),
|
||||
bgClientCache: cache.NewSDKClientCache(cacheOpts),
|
||||
putClientCache: cache.NewSDKClientCache(cacheOpts),
|
||||
|
@ -626,12 +634,14 @@ func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) c
|
|||
netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
|
||||
fatalOnErr(err)
|
||||
|
||||
var reBootstrapTurnedOff atomic.Bool
|
||||
reBootstrapTurnedOff.Store(relayOnly)
|
||||
return cfgNetmap{
|
||||
scriptHash: contractsconfig.Netmap(appCfg),
|
||||
state: netState,
|
||||
workerPool: netmapWorkerPool,
|
||||
needBootstrap: !relayOnly,
|
||||
reBoostrapTurnedOff: atomic.NewBool(relayOnly),
|
||||
reBoostrapTurnedOff: &reBootstrapTurnedOff,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -668,12 +678,12 @@ func (c *cfg) engineOpts() []engine.Option {
|
|||
opts = append(opts,
|
||||
engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
|
||||
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
|
||||
|
||||
engine.WithLogger(c.log),
|
||||
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
|
||||
)
|
||||
|
||||
if c.metricsCollector != nil {
|
||||
opts = append(opts, engine.WithMetrics(c.metricsCollector))
|
||||
opts = append(opts, engine.WithMetrics(c.metricsCollector.Engine()))
|
||||
}
|
||||
|
||||
return opts
|
||||
|
@ -722,6 +732,9 @@ func (c *cfg) getPiloramaOpts(shCfg shardCfg) []pilorama.Option {
|
|||
pilorama.WithMaxBatchSize(prRead.maxBatchSize),
|
||||
pilorama.WithMaxBatchDelay(prRead.maxBatchDelay),
|
||||
)
|
||||
if c.metricsCollector != nil {
|
||||
piloramaOpts = append(piloramaOpts, pilorama.WithMetrics(lsmetrics.NewPiloramaMetrics(c.metricsCollector.PiloramaMetrics())))
|
||||
}
|
||||
}
|
||||
return piloramaOpts
|
||||
}
|
||||
|
@ -731,27 +744,46 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
|||
for _, sRead := range shCfg.subStorages {
|
||||
switch sRead.typ {
|
||||
case blobovniczatree.Type:
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||
blobovniczatree.WithRootPath(sRead.path),
|
||||
blobovniczatree.WithPermissions(sRead.perm),
|
||||
blobovniczatree.WithBlobovniczaSize(sRead.size),
|
||||
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
|
||||
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
|
||||
blobTreeOpts := []blobovniczatree.Option{
|
||||
blobovniczatree.WithRootPath(sRead.path),
|
||||
blobovniczatree.WithPermissions(sRead.perm),
|
||||
blobovniczatree.WithBlobovniczaSize(sRead.size),
|
||||
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
|
||||
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
|
||||
blobovniczatree.WithLogger(c.log),
|
||||
}
|
||||
|
||||
blobovniczatree.WithLogger(c.log)),
|
||||
if c.metricsCollector != nil {
|
||||
blobTreeOpts = append(blobTreeOpts,
|
||||
blobovniczatree.WithMetrics(
|
||||
lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobobvnizcaTreeMetrics()),
|
||||
),
|
||||
)
|
||||
}
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: blobovniczatree.NewBlobovniczaTree(blobTreeOpts...),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
return uint64(len(data)) < shCfg.smallSizeObjectLimit
|
||||
},
|
||||
})
|
||||
case fstree.Type:
|
||||
fstreeOpts := []fstree.Option{
|
||||
fstree.WithPath(sRead.path),
|
||||
fstree.WithPerm(sRead.perm),
|
||||
fstree.WithDepth(sRead.depth),
|
||||
fstree.WithNoSync(sRead.noSync),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
fstreeOpts = append(fstreeOpts,
|
||||
fstree.WithMetrics(
|
||||
lsmetrics.NewFSTreeMetricsWithoutShardID(sRead.path, c.metricsCollector.FSTree()),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: fstree.New(
|
||||
fstree.WithPath(sRead.path),
|
||||
fstree.WithPerm(sRead.perm),
|
||||
fstree.WithDepth(sRead.depth),
|
||||
fstree.WithNoSync(sRead.noSync)),
|
||||
Storage: fstree.New(fstreeOpts...),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
return true
|
||||
},
|
||||
|
@ -769,31 +801,39 @@ func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
|
|||
piloramaOpts := c.getPiloramaOpts(shCfg)
|
||||
ss := c.getSubstorageOpts(shCfg)
|
||||
|
||||
blobstoreOpts := []blobstor.Option{
|
||||
blobstor.WithCompressObjects(shCfg.compress),
|
||||
blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType),
|
||||
blobstor.WithStorages(ss),
|
||||
blobstor.WithLogger(c.log),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore())))
|
||||
}
|
||||
|
||||
mbOptions := []meta.Option{
|
||||
meta.WithPath(shCfg.metaCfg.path),
|
||||
meta.WithPermissions(shCfg.metaCfg.perm),
|
||||
meta.WithMaxBatchSize(shCfg.metaCfg.maxBatchSize),
|
||||
meta.WithMaxBatchDelay(shCfg.metaCfg.maxBatchDelay),
|
||||
meta.WithBoltDBOptions(&bbolt.Options{
|
||||
Timeout: 100 * time.Millisecond,
|
||||
}),
|
||||
meta.WithLogger(c.log),
|
||||
meta.WithEpochState(c.cfgNetmap.state),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
|
||||
}
|
||||
|
||||
var sh shardOptsWithID
|
||||
sh.configID = shCfg.id()
|
||||
sh.shOpts = []shard.Option{
|
||||
shard.WithLogger(c.log),
|
||||
shard.WithRefillMetabase(shCfg.refillMetabase),
|
||||
shard.WithMode(shCfg.mode),
|
||||
shard.WithBlobStorOptions(
|
||||
blobstor.WithCompressObjects(shCfg.compress),
|
||||
blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType),
|
||||
blobstor.WithStorages(ss),
|
||||
|
||||
blobstor.WithLogger(c.log),
|
||||
),
|
||||
shard.WithMetaBaseOptions(
|
||||
meta.WithPath(shCfg.metaCfg.path),
|
||||
meta.WithPermissions(shCfg.metaCfg.perm),
|
||||
meta.WithMaxBatchSize(shCfg.metaCfg.maxBatchSize),
|
||||
meta.WithMaxBatchDelay(shCfg.metaCfg.maxBatchDelay),
|
||||
meta.WithBoltDBOptions(&bbolt.Options{
|
||||
Timeout: 100 * time.Millisecond,
|
||||
}),
|
||||
|
||||
meta.WithLogger(c.log),
|
||||
meta.WithEpochState(c.cfgNetmap.state),
|
||||
),
|
||||
shard.WithBlobStorOptions(blobstoreOpts...),
|
||||
shard.WithMetaBaseOptions(mbOptions...),
|
||||
shard.WithPiloramaOptions(piloramaOpts...),
|
||||
shard.WithWriteCache(shCfg.writecacheCfg.enabled),
|
||||
shard.WithWriteCacheOptions(writeCacheOpts...),
|
||||
|
@ -842,18 +882,9 @@ func initLocalStorage(c *cfg) {
|
|||
// service will be created later
|
||||
c.cfgObject.getSvc = new(getsvc.Service)
|
||||
|
||||
var tssPrm tsourse.TombstoneSourcePrm
|
||||
tssPrm.SetGetService(c.cfgObject.getSvc)
|
||||
tombstoneSrc := tsourse.NewSource(tssPrm)
|
||||
|
||||
tombstoneSource := tombstone.NewChecker(
|
||||
tombstone.WithLogger(c.log),
|
||||
tombstone.WithTombstoneSource(tombstoneSrc),
|
||||
)
|
||||
|
||||
var shardsAttached int
|
||||
for _, optsWithMeta := range c.shardOpts() {
|
||||
id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(tombstoneSource))...)
|
||||
id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))...)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
||||
} else {
|
||||
|
@ -966,13 +997,6 @@ func (c *cfg) needBootstrap() bool {
|
|||
return c.cfgNetmap.needBootstrap
|
||||
}
|
||||
|
||||
// ObjectServiceLoad implements system loader interface for policer component.
|
||||
// It is calculated as size/capacity ratio of "remote object put" worker.
|
||||
// Returns float value between 0.0 and 1.0.
|
||||
func (c *cfg) ObjectServiceLoad() float64 {
|
||||
return float64(c.cfgObject.pool.putRemote.Running()) / float64(c.cfgObject.pool.putRemoteCapacity)
|
||||
}
|
||||
|
||||
type dCmp struct {
|
||||
name string
|
||||
reloadFunc func() error
|
||||
|
@ -1031,6 +1055,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
}
|
||||
|
||||
components = append(components, dCmp{"logger", logPrm.Reload})
|
||||
components = append(components, dCmp{"runtime", func() error {
|
||||
setRuntimeParameters(c)
|
||||
return nil
|
||||
}})
|
||||
components = append(components, dCmp{"tracing", func() error {
|
||||
updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
|
||||
if updated {
|
||||
|
@ -1054,7 +1082,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
var rcfg engine.ReConfiguration
|
||||
for _, optsWithID := range c.shardOpts() {
|
||||
rcfg.AddShard(optsWithID.configID, optsWithID.shOpts)
|
||||
rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
|
||||
}
|
||||
|
||||
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
|
||||
|
@ -1075,9 +1103,22 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
}
|
||||
|
||||
func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
|
||||
var tssPrm tsourse.TombstoneSourcePrm
|
||||
tssPrm.SetGetService(c.cfgObject.getSvc)
|
||||
tombstoneSrc := tsourse.NewSource(tssPrm)
|
||||
|
||||
tombstoneSource := tombstone.NewChecker(
|
||||
tombstone.WithLogger(c.log),
|
||||
tombstone.WithTombstoneSource(tombstoneSrc),
|
||||
)
|
||||
return tombstoneSource
|
||||
}
|
||||
|
||||
func (c *cfg) shutdown() {
|
||||
c.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
|
||||
|
||||
c.ctxCancel()
|
||||
c.done <- struct{}{}
|
||||
for i := range c.closers {
|
||||
c.closers[len(c.closers)-1-i].fn()
|
||||
|
|
|
@ -83,3 +83,8 @@ func ShardPoolSize(c *config.Config) uint32 {
|
|||
func ShardErrorThreshold(c *config.Config) uint32 {
|
||||
return config.Uint32Safe(c.Sub(subsection), "shard_ro_error_threshold")
|
||||
}
|
||||
|
||||
// EngineLowMemoryConsumption returns value of "lowmem" config parmeter from "storage" section.
|
||||
func EngineLowMemoryConsumption(c *config.Config) bool {
|
||||
return config.BoolSafe(c.Sub(subsection), "low_mem")
|
||||
}
|
||||
|
|
|
@ -5,8 +5,6 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/storage"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
||||
)
|
||||
|
||||
// Config is a wrapper over the config section
|
||||
|
@ -25,14 +23,11 @@ func (x *Config) Storages() []*storage.Config {
|
|||
typ := config.String(
|
||||
(*config.Config)(x),
|
||||
strconv.Itoa(i)+".type")
|
||||
switch typ {
|
||||
case "":
|
||||
if typ == "" {
|
||||
return ss
|
||||
case fstree.Type, blobovniczatree.Type:
|
||||
sub := storage.From((*config.Config)(x).Sub(strconv.Itoa(i)))
|
||||
ss = append(ss, sub)
|
||||
default:
|
||||
panic("invalid type")
|
||||
}
|
||||
|
||||
sub := storage.From((*config.Config)(x).Sub(strconv.Itoa(i)))
|
||||
ss = append(ss, sub)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,3 +25,9 @@ func HeadTimeout(c *config.Config) time.Duration {
|
|||
|
||||
return HeadTimeoutDefault
|
||||
}
|
||||
|
||||
// UnsafeDisable returns the value of "unsafe_disable" config parameter
|
||||
// from "policer" section.
|
||||
func UnsafeDisable(c *config.Config) bool {
|
||||
return config.BoolSafe(c.Sub(subsection), "unsafe_disable")
|
||||
}
|
||||
|
|
|
@ -51,3 +51,27 @@ func Address(c *config.Config) string {
|
|||
|
||||
return AddressDefault
|
||||
}
|
||||
|
||||
// BlockRates returns the value of "block_rate" config parameter
|
||||
// from "pprof" section.
|
||||
func BlockRate(c *config.Config) int {
|
||||
s := c.Sub(subsection)
|
||||
|
||||
v := int(config.IntSafe(s, "block_rate"))
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// MutexRate returns the value of "mutex_rate" config parameter
|
||||
// from "pprof" section.
|
||||
func MutexRate(c *config.Config) int {
|
||||
s := c.Sub(subsection)
|
||||
|
||||
v := int(config.IntSafe(s, "mutex_rate"))
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
|
|
@ -18,6 +18,9 @@ func TestProfilerSection(t *testing.T) {
|
|||
require.Equal(t, profilerconfig.ShutdownTimeoutDefault, to)
|
||||
require.Equal(t, profilerconfig.AddressDefault, addr)
|
||||
require.False(t, profilerconfig.Enabled(configtest.EmptyConfig()))
|
||||
|
||||
require.Zero(t, profilerconfig.BlockRate(configtest.EmptyConfig()))
|
||||
require.Zero(t, profilerconfig.MutexRate(configtest.EmptyConfig()))
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
|
@ -29,6 +32,9 @@ func TestProfilerSection(t *testing.T) {
|
|||
require.Equal(t, 15*time.Second, to)
|
||||
require.Equal(t, "localhost:6060", addr)
|
||||
require.True(t, profilerconfig.Enabled(c))
|
||||
|
||||
require.Equal(t, 10_000, profilerconfig.BlockRate(c))
|
||||
require.Equal(t, 10_000, profilerconfig.MutexRate(c))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
|
23
cmd/frostfs-node/config/runtime/config.go
Normal file
23
cmd/frostfs-node/config/runtime/config.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
)
|
||||
|
||||
const (
|
||||
subsection = "runtime"
|
||||
memoryLimitDefault = math.MaxInt64
|
||||
)
|
||||
|
||||
// GCMemoryLimitBytes returns the value of "soft_memory_limit" config parameter from "runtime" section.
|
||||
func GCMemoryLimitBytes(c *config.Config) int64 {
|
||||
l := config.SizeInBytesSafe(c.Sub(subsection), "soft_memory_limit")
|
||||
|
||||
if l > 0 {
|
||||
return int64(l)
|
||||
}
|
||||
|
||||
return memoryLimitDefault
|
||||
}
|
30
cmd/frostfs-node/config/runtime/config_test.go
Normal file
30
cmd/frostfs-node/config/runtime/config_test.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGCMemoryLimit(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
empty := configtest.EmptyConfig()
|
||||
|
||||
require.Equal(t, int64(math.MaxInt64), GCMemoryLimitBytes(empty))
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Equal(t, int64(1073741824), GCMemoryLimitBytes(c))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
}
|
|
@ -1,9 +1,9 @@
|
|||
package tracing
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue