fix/355-increase-tree-service-client-cache-size #359
584 changed files with 10789 additions and 21571 deletions
1
.gitattributes
vendored
1
.gitattributes
vendored
|
@ -1,2 +1,3 @@
|
|||
/**/*.pb.go -diff -merge
|
||||
/**/*.pb.go linguist-generated=true
|
||||
/go.sum -diff
|
||||
|
|
|
@ -31,7 +31,7 @@ repos:
|
|||
- id: shellcheck
|
||||
|
||||
- repo: https://github.com/golangci/golangci-lint
|
||||
rev: v1.51.2
|
||||
rev: v1.52.2
|
||||
hooks:
|
||||
- id: golangci-lint
|
||||
|
||||
|
@ -48,3 +48,4 @@ repos:
|
|||
rev: v1.0.0-rc.1
|
||||
hooks:
|
||||
- id: go-staticcheck-repo-mod
|
||||
- id: go-mod-tidy
|
||||
|
|
28
CHANGELOG.md
28
CHANGELOG.md
|
@ -4,10 +4,32 @@ Changelog for FrostFS Node
|
|||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
- Support impersonate bearer token (#229)
|
||||
- Change log level on SIGHUP for ir (#125)
|
||||
- Reload pprof and metrics on SIGHUP for ir (#125)
|
||||
- Support copies number parameter in `frostfs-cli object put` (#351)
|
||||
- Set extra wallets on SIGHUP for ir (#125)
|
||||
|
||||
### Changed
|
||||
- `frostfs-cli util locode generate` is now much faster (#309)
|
||||
### Fixed
|
||||
- Take network settings into account during netmap contract update (#100)
|
||||
- Read config files from dir even if config file not provided via `--config` for node (#238)
|
||||
- Notary requests parsing according to `neo-go`'s updates (#268)
|
||||
- Tree service panic in its internal client cache (#322)
|
||||
- Iterate over endpoints when create ws client in morph's constructor (#304)
|
||||
- Delete complex objects with GC (#332)
|
||||
|
||||
### Removed
|
||||
### Updated
|
||||
- `paulmach/orb` to v0.9.1
|
||||
- `github.com/nats-io/nats.go` to `v1.25.0`
|
||||
- `golang.org/x/sync` to `v0.2.0`
|
||||
- `golang.org/x/term` to `v0.8.0`
|
||||
- `github.com/spf13/cobra` to `v1.7.0`
|
||||
- `github.com/multiformats/go-multiaddr` to `v0.9.0`
|
||||
- `go.uber.org/atomic` to `v1.11.0`
|
||||
|
||||
### Updating from v0.36.0
|
||||
|
||||
## [v0.36.0] - 2023-04-12 - Furtwängler
|
||||
|
@ -25,6 +47,7 @@ Changelog for FrostFS Node
|
|||
- Parameters `nns-name` and `nns-zone` for command `frostfs-cli container create` (#37)
|
||||
- Tree service now saves the last synchronization height which persists across restarts (#82)
|
||||
- Add tracing support (#135)
|
||||
- Multiple (and a fix for single) copies number support for `PUT` requests (#221)
|
||||
|
||||
### Changed
|
||||
- Change `frostfs_node_engine_container_size` to counting sizes of logical objects
|
||||
|
@ -64,6 +87,9 @@ Changelog for FrostFS Node
|
|||
- Iterating over just removed files by FSTree (#98)
|
||||
- Parts of a locked object could not be removed anymore (#141)
|
||||
- Non-alphabet nodes do not try to handle alphabet events (#181)
|
||||
- Failing SN and IR transactions because of incorrect scopes (#2230, #2263)
|
||||
- Global scope used for some transactions (#2230, #2263)
|
||||
- Concurrent morph cache misses (#30)
|
||||
|
||||
### Removed
|
||||
### Updated
|
||||
|
@ -84,7 +110,7 @@ You need to change configuration environment variables to `FROSTFS_*` if you use
|
|||
New config field `object.delete.tombstone_lifetime` allows to set tombstone lifetime
|
||||
more appropriate for a specific deployment.
|
||||
|
||||
Use `__SYSTEM__` prefix for system attributes instead of `__NEOFS__`
|
||||
Use `__SYSTEM__` prefix for system attributes instead of `__NEOFS__`
|
||||
(existed objects with old attributes will be treated as before, but for new objects new attributes will be used).
|
||||
|
||||
## Older versions
|
||||
|
|
2
Makefile
2
Makefile
|
@ -8,7 +8,7 @@ HUB_IMAGE ?= truecloudlab/frostfs
|
|||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
GO_VERSION ?= 1.19
|
||||
LINT_VERSION ?= 1.50.0
|
||||
LINT_VERSION ?= 1.52.2
|
||||
ARCH = amd64
|
||||
|
||||
BIN = bin
|
||||
|
|
|
@ -147,7 +147,6 @@ NNS: Set container.frostfs -> cae60bdd689d185901e495352d0247752ce50846
|
|||
NNS: Set frostfsid.frostfs -> c421fb60a3895865a8f24d197d6a80ef686041d2
|
||||
NNS: Set netmap.frostfs -> 894eb854632f50fb124412ce7951ebc00763525e
|
||||
NNS: Set proxy.frostfs -> ac6e6fe4b373d0ca0ca4969d1e58fa0988724e7d
|
||||
NNS: Set reputation.frostfs -> 6eda57c9d93d990573646762d1fea327ce41191f
|
||||
Waiting for transactions to persist...
|
||||
```
|
||||
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
# FrostFS subnetwork creation
|
||||
|
||||
This is a short guide on how to create FrostFS subnetworks. This guide
|
||||
considers that the sidechain and the inner ring (alphabet nodes) have already been
|
||||
deployed and the sidechain contains a deployed `subnet` contract.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To follow this guide, you need:
|
||||
- neo-go sidechain RPC endpoint;
|
||||
- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases);
|
||||
- wallet with FrostFS account.
|
||||
|
||||
## Creation
|
||||
|
||||
```shell
|
||||
$ frostfs-adm morph subnet create \
|
||||
-r <side_chain_RPC_endpoint> \
|
||||
-w </path/to/owner/wallet> \
|
||||
--notary
|
||||
Create subnet request sent successfully. ID: 4223489767.
|
||||
```
|
||||
|
||||
**NOTE:** in notary-enabled environment you should have a sufficient
|
||||
notary deposit (not expired, with enough GAS balance). Your subnet ID
|
||||
will differ from the example.
|
||||
|
||||
The default account in the wallet that has been passed with `-w` flag is the owner
|
||||
of the just created subnetwork.
|
||||
|
||||
You can check if your subnetwork was created successfully:
|
||||
|
||||
```shell
|
||||
$ frostfs-adm morph subnet get \
|
||||
-r <side_chain_RPC_endpoint> \
|
||||
--subnet <subnet_ID>
|
||||
Owner: NUc734PMJXiqa2J9jRtvskU3kCdyyuSN8Q
|
||||
```
|
||||
Your owner will differ from the example.
|
|
@ -1,137 +0,0 @@
|
|||
# Managing Subnetworks
|
||||
|
||||
This is a short guide on how to manage FrostFS subnetworks. This guide
|
||||
considers that the sidechain and the inner ring (alphabet nodes) have already been
|
||||
deployed, and the sidechain contains a deployed `subnet` contract.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- neo-go sidechain RPC endpoint;
|
||||
- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases);
|
||||
- [created](subnetwork-creation.md) subnetwork;
|
||||
- wallet with the account that owns the subnetwork;
|
||||
- public key of the Storage Node;
|
||||
- public keys of the node and client administrators;
|
||||
- owner IDs of the FrostFS users.
|
||||
|
||||
## Add node administrator
|
||||
|
||||
Node administrators are accounts that can manage (add and delete nodes)
|
||||
the whitelist of the nodes which can be included to a subnetwork. Only the subnet
|
||||
owner is allowed to add and remove node administrators from the subnetwork.
|
||||
|
||||
```shell
|
||||
$ frostfs-adm morph subnet admin add \
|
||||
-r <side_chain_RPC_endpoint> \
|
||||
-w </path/to/owner/wallet> \
|
||||
--admin <HEX_admin_public_key> \
|
||||
--subnet <subnet_ID>
|
||||
Add admin request sent successfully.
|
||||
```
|
||||
|
||||
## Add node
|
||||
|
||||
Adding a node to a subnetwork means that the node becomes able to service
|
||||
containers that have been created in that subnetwork. Addition only changes
|
||||
the list of the allowed nodes. Node is not required to be bootstrapped at the
|
||||
moment of its inclusion.
|
||||
|
||||
```shell
|
||||
$ frostfs-adm morph subnet node add \
|
||||
-r <side_chain_RPC_endpoint> \
|
||||
-w </path/to/node_admin/wallet> \
|
||||
--node <HEX_node_public_key> \
|
||||
--subnet <subnet_ID>
|
||||
Add node request sent successfully.
|
||||
```
|
||||
|
||||
**NOTE:** the owner of the subnetwork is also allowed to add nodes.
|
||||
|
||||
## Add client administrator
|
||||
|
||||
Client administrators are accounts that can manage (add and delete
|
||||
nodes) the whitelist of the clients that can create containers in the
|
||||
subnetwork. Only the subnet owner is allowed to add and remove client
|
||||
administrators from the subnetwork.
|
||||
|
||||
```shell
|
||||
$ frostfs-adm morph subnet admin add \
|
||||
-r <side_chain_RPC_endpoint> \
|
||||
-w </path/to/owner/wallet> \
|
||||
--admin <HEX_admin_public_key> \
|
||||
--subnet <subnet_ID> \
|
||||
--client \
|
||||
--group <group_ID>
|
||||
Add admin request sent successfully.
|
||||
```
|
||||
|
||||
**NOTE:** you do not need to create a group explicitly, it will be created
|
||||
right after the first client admin is added. Group ID is a 4-byte
|
||||
positive integer number.
|
||||
|
||||
## Add client
|
||||
|
||||
```shell
|
||||
$ frostfs-adm morph subnet client add \
|
||||
-r <side_chain_RPC_endpoint> \
|
||||
-w </path/to/client_admin/wallet> \
|
||||
--client <client_ownerID> \
|
||||
--subnet <subnet_ID> \
|
||||
--group <group_ID>
|
||||
Add client request sent successfully.
|
||||
```
|
||||
|
||||
**NOTE:** the owner of the subnetwork is also allowed to add clients. This is
|
||||
the only one command that accepts `ownerID`, not the public key.
|
||||
Administrator can manage only their group (a group where that administrator
|
||||
has been added by the subnet owner).
|
||||
|
||||
# Bootstrapping Storage Node
|
||||
|
||||
After a subnetwork [is created](subnetwork-creation.md) and a node is included into it, the
|
||||
node could be bootstrapped and service subnetwork containers.
|
||||
|
||||
For bootstrapping, you need to specify the ID of the subnetwork in the node's
|
||||
configuration:
|
||||
|
||||
```yaml
|
||||
...
|
||||
node:
|
||||
...
|
||||
subnet:
|
||||
entries: # list of IDs of subnets to enter in a text format of FrostFS API protocol (overrides corresponding attributes)
|
||||
- <subnetwork_ID>
|
||||
...
|
||||
...
|
||||
```
|
||||
|
||||
**NOTE:** specifying subnetwork that is denied for the node is not an error:
|
||||
that configuration value would be ignored. You do not need to specify zero
|
||||
(with 0 ID) subnetwork: its inclusion is implicit. On the contrary, to exclude
|
||||
a node from the default zero subnetwork, you need to specify it explicitly:
|
||||
|
||||
```yaml
|
||||
...
|
||||
node:
|
||||
...
|
||||
subnet:
|
||||
exit_zero: true # toggle entrance to zero subnet (overrides corresponding attribute and occurrence in `entries`)
|
||||
...
|
||||
...
|
||||
```
|
||||
|
||||
# Creating container in non-zero subnetwork
|
||||
|
||||
Creating containers without using `--subnet` flag is equivalent to
|
||||
creating container in the zero subnetwork.
|
||||
|
||||
To create a container in a private network, your wallet must be added to
|
||||
the client whitelist by the client admins or the subnet owners:
|
||||
|
||||
```shell
|
||||
$ frostfs-cli container create \
|
||||
--policy 'REP 1' \
|
||||
-w </path/to/wallet> \
|
||||
-r s01.frostfs.devenv:8080 \
|
||||
--subnet <subnet_ID>
|
||||
```
|
|
@ -47,7 +47,7 @@ credentials:
|
|||
{{.}}: password{{end}}
|
||||
`
|
||||
|
||||
func initConfig(cmd *cobra.Command, args []string) error {
|
||||
func initConfig(cmd *cobra.Command, _ []string) error {
|
||||
configPath, err := readConfigPathFromArgs(cmd)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
|
|
@ -10,12 +10,12 @@ import (
|
|||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
@ -48,41 +48,25 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
|
|||
buf := bytes.NewBuffer(nil)
|
||||
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
||||
|
||||
for _, param := range arr {
|
||||
tuple, ok := param.Value().([]stackitem.Item)
|
||||
if !ok || len(tuple) != 2 {
|
||||
return errors.New("invalid ListConfig response from netmap contract")
|
||||
}
|
||||
|
||||
k, err := tuple[0].TryBytes()
|
||||
if err != nil {
|
||||
return errors.New("invalid config key from netmap contract")
|
||||
}
|
||||
|
||||
v, err := tuple[1].TryBytes()
|
||||
if err != nil {
|
||||
return invalidConfigValueErr(k)
|
||||
}
|
||||
|
||||
switch string(k) {
|
||||
case netmapAuditFeeKey, netmapBasicIncomeRateKey,
|
||||
netmapContainerFeeKey, netmapContainerAliasFeeKey,
|
||||
netmapEigenTrustIterationsKey,
|
||||
netmapEpochKey, netmapInnerRingCandidateFeeKey,
|
||||
netmapMaxObjectSizeKey, netmapWithdrawFeeKey:
|
||||
m, err := parseConfigFromNetmapContract(arr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig,
|
||||
netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
|
||||
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
|
||||
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
|
||||
nbuf := make([]byte, 8)
|
||||
copy(nbuf[:], v)
|
||||
n := binary.LittleEndian.Uint64(nbuf)
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
|
||||
case netmapEigenTrustAlphaKey:
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (str)\n", k, v)))
|
||||
case netmapHomomorphicHashDisabledKey, netmapMaintenanceAllowedKey:
|
||||
vBool, err := tuple[1].TryBool()
|
||||
if err != nil {
|
||||
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
|
||||
if len(v) == 0 || len(v) > 1 {
|
||||
return invalidConfigValueErr(k)
|
||||
}
|
||||
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, vBool)))
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
|
||||
default:
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
|
||||
}
|
||||
|
@ -150,25 +134,15 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
|
|||
valRaw := v
|
||||
|
||||
switch key {
|
||||
case netmapAuditFeeKey, netmapBasicIncomeRateKey,
|
||||
netmapContainerFeeKey, netmapContainerAliasFeeKey,
|
||||
netmapEigenTrustIterationsKey,
|
||||
netmapEpochKey, netmapInnerRingCandidateFeeKey,
|
||||
netmapMaxObjectSizeKey, netmapWithdrawFeeKey:
|
||||
case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig,
|
||||
netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
|
||||
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
|
||||
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
|
||||
val, err = strconv.ParseInt(valRaw, 10, 64)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not parse %s's value '%s' as int: %w", key, valRaw, err)
|
||||
}
|
||||
case netmapEigenTrustAlphaKey:
|
||||
// just check that it could
|
||||
// be parsed correctly
|
||||
_, err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not parse %s's value '%s' as float: %w", key, valRaw, err)
|
||||
}
|
||||
|
||||
val = valRaw
|
||||
case netmapHomomorphicHashDisabledKey, netmapMaintenanceAllowedKey:
|
||||
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
|
||||
val, err = strconv.ParseBool(valRaw)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not parse %s's value '%s' as bool: %w", key, valRaw, err)
|
||||
|
@ -187,6 +161,6 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
|
|||
return
|
||||
}
|
||||
|
||||
func invalidConfigValueErr(key []byte) error {
|
||||
func invalidConfigValueErr(key string) error {
|
||||
return fmt.Errorf("invalid %s config value from netmap contract", key)
|
||||
}
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
package morph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-github/v39/github"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func downloadContractsFromGithub(cmd *cobra.Command) (io.ReadCloser, error) {
|
||||
gcl := github.NewClient(nil)
|
||||
release, _, err := gcl.Repositories.GetLatestRelease(context.Background(), "nspcc-dev", "frostfs-contract")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't fetch release info: %w", err)
|
||||
}
|
||||
|
||||
cmd.Printf("Found %s (%s), downloading...\n", release.GetTagName(), release.GetName())
|
||||
|
||||
var url string
|
||||
for _, a := range release.Assets {
|
||||
if strings.HasPrefix(a.GetName(), "frostfs-contract") {
|
||||
url = a.GetBrowserDownloadURL()
|
||||
break
|
||||
}
|
||||
}
|
||||
if url == "" {
|
||||
return nil, errors.New("can't find contracts archive in release assets")
|
||||
}
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't fetch contracts archive: %w", err)
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func forceNewEpochCmd(cmd *cobra.Command, args []string) error {
|
||||
func forceNewEpochCmd(cmd *cobra.Command, _ []string) error {
|
||||
wCtx, err := newInitializeContext(cmd, viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't to initialize context: %w", err)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -29,7 +30,7 @@ const (
|
|||
consensusAccountName = "consensus"
|
||||
)
|
||||
|
||||
func generateAlphabetCreds(cmd *cobra.Command, args []string) error {
|
||||
func generateAlphabetCreds(cmd *cobra.Command, _ []string) error {
|
||||
// alphabet size is not part of the config
|
||||
size, err := cmd.Flags().GetUint(alphabetSizeFlag)
|
||||
if err != nil {
|
||||
|
@ -92,28 +93,32 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
|
|||
pubs[i] = w.Accounts[0].PrivateKey().PublicKey()
|
||||
}
|
||||
|
||||
var errG errgroup.Group
|
||||
|
||||
// Create committee account with N/2+1 multi-signature.
|
||||
majCount := smartcontract.GetMajorityHonestNodeCount(size)
|
||||
for i, w := range wallets {
|
||||
if err := addMultisigAccount(w, majCount, committeeAccountName, passwords[i], pubs); err != nil {
|
||||
return nil, fmt.Errorf("can't create committee account: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create consensus account with 2*N/3+1 multi-signature.
|
||||
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
|
||||
for i, w := range wallets {
|
||||
if err := addMultisigAccount(w, bftCount, consensusAccountName, passwords[i], pubs); err != nil {
|
||||
return nil, fmt.Errorf("can't create consensus account: %w", err)
|
||||
}
|
||||
for i := range wallets {
|
||||
i := i
|
||||
ps := make(keys.PublicKeys, len(pubs))
|
||||
copy(ps, pubs)
|
||||
errG.Go(func() error {
|
||||
if err := addMultisigAccount(wallets[i], majCount, committeeAccountName, passwords[i], ps); err != nil {
|
||||
return fmt.Errorf("can't create committee account: %w", err)
|
||||
}
|
||||
if err := addMultisigAccount(wallets[i], bftCount, consensusAccountName, passwords[i], ps); err != nil {
|
||||
return fmt.Errorf("can't create consentus account: %w", err)
|
||||
}
|
||||
if err := wallets[i].SavePretty(); err != nil {
|
||||
return fmt.Errorf("can't save wallet: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
for _, w := range wallets {
|
||||
if err := w.SavePretty(); err != nil {
|
||||
return nil, fmt.Errorf("can't save wallet: %w", err)
|
||||
}
|
||||
if err := errG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return passwords, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
|
@ -71,24 +72,31 @@ func TestGenerateAlphabet(t *testing.T) {
|
|||
buf.WriteString(testContractPassword + "\r")
|
||||
require.NoError(t, generateAlphabetCreds(generateAlphabetCmd, nil))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := uint64(0); i < size; i++ {
|
||||
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||
w, err := wallet.NewWalletFromFile(p)
|
||||
require.NoError(t, err, "wallet doesn't exist")
|
||||
require.Equal(t, 3, len(w.Accounts), "not all accounts were created")
|
||||
for _, a := range w.Accounts {
|
||||
err := a.Decrypt(strconv.FormatUint(i, 10), keys.NEP2ScryptParams())
|
||||
require.NoError(t, err, "can't decrypt account")
|
||||
switch a.Label {
|
||||
case consensusAccountName:
|
||||
require.Equal(t, smartcontract.GetDefaultHonestNodeCount(size), len(a.Contract.Parameters))
|
||||
case committeeAccountName:
|
||||
require.Equal(t, smartcontract.GetMajorityHonestNodeCount(size), len(a.Contract.Parameters))
|
||||
default:
|
||||
require.Equal(t, singleAccountName, a.Label)
|
||||
i := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||
w, err := wallet.NewWalletFromFile(p)
|
||||
require.NoError(t, err, "wallet doesn't exist")
|
||||
require.Equal(t, 3, len(w.Accounts), "not all accounts were created")
|
||||
|
||||
for _, a := range w.Accounts {
|
||||
err := a.Decrypt(strconv.FormatUint(i, 10), keys.NEP2ScryptParams())
|
||||
require.NoError(t, err, "can't decrypt account")
|
||||
switch a.Label {
|
||||
case consensusAccountName:
|
||||
require.Equal(t, smartcontract.GetDefaultHonestNodeCount(size), len(a.Contract.Parameters))
|
||||
case committeeAccountName:
|
||||
require.Equal(t, smartcontract.GetMajorityHonestNodeCount(size), len(a.Contract.Parameters))
|
||||
default:
|
||||
require.Equal(t, singleAccountName, a.Label)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
t.Run("check contract group wallet", func(t *testing.T) {
|
||||
p := filepath.Join(walletDir, contractWalletFilename)
|
||||
|
|
|
@ -45,7 +45,7 @@ type initializeContext struct {
|
|||
ContractPath string
|
||||
}
|
||||
|
||||
func initializeSideChainCmd(cmd *cobra.Command, args []string) error {
|
||||
func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
|
||||
initCtx, err := newInitializeContext(cmd, viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("initialization error: %w", err)
|
||||
|
@ -91,11 +91,7 @@ func initializeSideChainCmd(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
|
||||
cmd.Println("Stage 7: set addresses in NNS.")
|
||||
if err := initCtx.setNNS(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return initCtx.setNNS()
|
||||
}
|
||||
|
||||
func (c *initializeContext) close() {
|
||||
|
@ -142,7 +138,7 @@ func newInitializeContext(cmd *cobra.Command, v *viper.Viper) (*initializeContex
|
|||
return nil, err
|
||||
}
|
||||
|
||||
ctrPath, err := getContractsPath(cmd, v, needContracts)
|
||||
ctrPath, err := getContractsPath(cmd, needContracts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -222,7 +218,7 @@ func getWallet(cmd *cobra.Command, v *viper.Viper, needContracts bool, walletDir
|
|||
return openContractWallet(v, cmd, walletDir)
|
||||
}
|
||||
|
||||
func getContractsPath(cmd *cobra.Command, v *viper.Viper, needContracts bool) (string, error) {
|
||||
func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
|
||||
if !needContracts {
|
||||
return "", nil
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
|
@ -23,6 +24,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
|
||||
|
@ -30,8 +32,8 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -45,26 +47,6 @@ const (
|
|||
frostfsIDContract = "frostfsid"
|
||||
netmapContract = "netmap"
|
||||
proxyContract = "proxy"
|
||||
reputationContract = "reputation"
|
||||
subnetContract = "subnet"
|
||||
)
|
||||
|
||||
const (
|
||||
netmapEpochKey = "EpochDuration"
|
||||
netmapMaxObjectSizeKey = "MaxObjectSize"
|
||||
netmapAuditFeeKey = "AuditFee"
|
||||
netmapContainerFeeKey = "ContainerFee"
|
||||
netmapContainerAliasFeeKey = "ContainerAliasFee"
|
||||
netmapEigenTrustIterationsKey = "EigenTrustIterations"
|
||||
netmapEigenTrustAlphaKey = "EigenTrustAlpha"
|
||||
netmapBasicIncomeRateKey = "BasicIncomeRate"
|
||||
netmapInnerRingCandidateFeeKey = "InnerRingCandidateFee"
|
||||
netmapWithdrawFeeKey = "WithdrawFee"
|
||||
netmapHomomorphicHashDisabledKey = "HomomorphicHashingDisabled"
|
||||
netmapMaintenanceAllowedKey = "MaintenanceModeAllowed"
|
||||
|
||||
defaultEigenTrustIterations = 4
|
||||
defaultEigenTrustAlpha = "0.1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -75,8 +57,6 @@ var (
|
|||
frostfsIDContract,
|
||||
netmapContract,
|
||||
proxyContract,
|
||||
reputationContract,
|
||||
subnetContract,
|
||||
}
|
||||
|
||||
fullContractList = append([]string{
|
||||
|
@ -85,6 +65,19 @@ var (
|
|||
nnsContract,
|
||||
alphabetContract,
|
||||
}, contractList...)
|
||||
|
||||
netmapConfigKeys = []string{
|
||||
netmap.EpochDurationConfig,
|
||||
netmap.MaxObjectSizeConfig,
|
||||
netmap.AuditFeeConfig,
|
||||
netmap.ContainerFeeConfig,
|
||||
netmap.ContainerAliasFeeConfig,
|
||||
netmap.BasicIncomeRateConfig,
|
||||
netmap.IrCandidateFeeConfig,
|
||||
netmap.WithdrawFeeConfig,
|
||||
netmap.HomomorphicHashingDisabledKey,
|
||||
netmap.MaintenanceModeAllowedConfig,
|
||||
}
|
||||
)
|
||||
|
||||
type contractState struct {
|
||||
|
@ -239,7 +232,7 @@ func (c *initializeContext) deployOrUpdateContracts(w *io2.BufBinWriter, nnsHash
|
|||
invokeHash = ctrHash
|
||||
}
|
||||
|
||||
params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam))
|
||||
params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam, updateMethodName))
|
||||
res, err := c.CommitteeAct.MakeCall(invokeHash, method, params...)
|
||||
if err != nil {
|
||||
if method != updateMethodName || !strings.Contains(err.Error(), common.ErrAlreadyUpdated) {
|
||||
|
@ -362,7 +355,7 @@ func (c *initializeContext) deployContracts() error {
|
|||
return fmt.Errorf("can't sign manifest group: %v", err)
|
||||
}
|
||||
|
||||
params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam))
|
||||
params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam, deployMethodName))
|
||||
res, err := c.CommitteeAct.MakeCall(management.Hash, deployMethodName, params...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't deploy %s contract: %w", ctrName, err)
|
||||
|
@ -408,11 +401,9 @@ func (c *initializeContext) readContracts(names []string) error {
|
|||
} else {
|
||||
var r io.ReadCloser
|
||||
if c.ContractPath == "" {
|
||||
c.Command.Println("Contracts flag is missing, latest release will be fetched from Github.")
|
||||
r, err = downloadContractsFromGithub(c.Command)
|
||||
} else {
|
||||
r, err = os.Open(c.ContractPath)
|
||||
return errors.New("contracts flag is missing")
|
||||
}
|
||||
r, err = os.Open(c.ContractPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open contracts archive: %w", err)
|
||||
}
|
||||
|
@ -529,7 +520,7 @@ func getContractDeployParameters(cs *contractState, deployData []any) []any {
|
|||
return []any{cs.RawNEF, cs.RawManifest, deployData}
|
||||
}
|
||||
|
||||
func (c *initializeContext) getContractDeployData(ctrName string, keysParam []any) []any {
|
||||
func (c *initializeContext) getContractDeployData(ctrName string, keysParam []any, method string) []any {
|
||||
items := make([]any, 1, 6)
|
||||
items[0] = false // notaryDisabled is false
|
||||
|
||||
|
@ -566,20 +557,31 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
|
|||
c.Contracts[netmapContract].Hash,
|
||||
c.Contracts[containerContract].Hash)
|
||||
case netmapContract:
|
||||
configParam := []any{
|
||||
netmapEpochKey, viper.GetInt64(epochDurationInitFlag),
|
||||
netmapMaxObjectSizeKey, viper.GetInt64(maxObjectSizeInitFlag),
|
||||
netmapAuditFeeKey, viper.GetInt64(auditFeeInitFlag),
|
||||
netmapContainerFeeKey, viper.GetInt64(containerFeeInitFlag),
|
||||
netmapContainerAliasFeeKey, viper.GetInt64(containerAliasFeeInitFlag),
|
||||
netmapEigenTrustIterationsKey, int64(defaultEigenTrustIterations),
|
||||
netmapEigenTrustAlphaKey, defaultEigenTrustAlpha,
|
||||
netmapBasicIncomeRateKey, viper.GetInt64(incomeRateInitFlag),
|
||||
netmapInnerRingCandidateFeeKey, viper.GetInt64(candidateFeeInitFlag),
|
||||
netmapWithdrawFeeKey, viper.GetInt64(withdrawFeeInitFlag),
|
||||
netmapHomomorphicHashDisabledKey, viper.GetBool(homomorphicHashDisabledInitFlag),
|
||||
netmapMaintenanceAllowedKey, viper.GetBool(maintenanceModeAllowedInitFlag),
|
||||
md := getDefaultNetmapContractConfigMap()
|
||||
if method == updateMethodName {
|
||||
arr, err := c.getNetConfigFromNetmapContract()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
m, err := parseConfigFromNetmapContract(arr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for k, v := range m {
|
||||
for _, key := range netmapConfigKeys {
|
||||
if k == key {
|
||||
md[k] = v
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var configParam []any
|
||||
for k, v := range md {
|
||||
configParam = append(configParam, k, v)
|
||||
}
|
||||
|
||||
items = append(items,
|
||||
c.Contracts[balanceContract].Hash,
|
||||
c.Contracts[containerContract].Hash,
|
||||
|
@ -587,14 +589,28 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
|
|||
configParam)
|
||||
case proxyContract:
|
||||
items = nil
|
||||
case reputationContract:
|
||||
case subnetContract:
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid contract name: %s", ctrName))
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
func (c *initializeContext) getNetConfigFromNetmapContract() ([]stackitem.Item, error) {
|
||||
cs, err := c.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NNS is not yet deployed: %w", err)
|
||||
}
|
||||
nmHash, err := nnsResolveHash(c.ReadOnlyInvoker, cs.Hash, netmapContract+".frostfs")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get netmap contract hash: %w", err)
|
||||
}
|
||||
arr, err := unwrap.Array(c.ReadOnlyInvoker.Call(nmHash, "listConfig"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't fetch list of network config keys from the netmap contract")
|
||||
}
|
||||
return arr, err
|
||||
}
|
||||
|
||||
func (c *initializeContext) getAlphabetDeployItems(i, n int) []any {
|
||||
items := make([]any, 6)
|
||||
items[0] = false
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
nnsClient "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -284,10 +285,11 @@ func parseNNSResolveResult(res stackitem.Item) (util.Uint160, error) {
|
|||
}
|
||||
|
||||
func nnsIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
|
||||
switch ct := c.(type) {
|
||||
switch c.(type) {
|
||||
case *rpcclient.Client:
|
||||
//lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
|
||||
return ct.NNSIsAvailable(nnsHash, name)
|
||||
inv := invoker.New(c, nil)
|
||||
reader := nnsClient.NewReader(inv, nnsHash)
|
||||
return reader.IsAvailable(name)
|
||||
default:
|
||||
b, err := unwrap.Bool(invokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
|
||||
if err != nil {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
|
@ -18,33 +19,24 @@ import (
|
|||
)
|
||||
|
||||
// initialAlphabetNEOAmount represents the total amount of GAS distributed between alphabet nodes.
|
||||
const initialAlphabetNEOAmount = native.NEOTotalSupply
|
||||
|
||||
func (c *initializeContext) registerCandidates() error {
|
||||
neoHash := neo.Hash
|
||||
|
||||
cc, err := unwrap.Array(c.ReadOnlyInvoker.Call(neoHash, "getCandidates"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("`getCandidates`: %w", err)
|
||||
}
|
||||
|
||||
if len(cc) > 0 {
|
||||
c.Command.Println("Candidates are already registered.")
|
||||
return nil
|
||||
}
|
||||
const (
|
||||
initialAlphabetNEOAmount = native.NEOTotalSupply
|
||||
registerBatchSize = transaction.MaxAttributes - 1
|
||||
)
|
||||
|
||||
func (c *initializeContext) registerCandidateRange(start, end int) error {
|
||||
regPrice, err := c.getCandidateRegisterPrice()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch registration price: %w", err)
|
||||
}
|
||||
|
||||
w := io.NewBufBinWriter()
|
||||
emit.AppCall(w.BinWriter, neoHash, "setRegisterPrice", callflag.States, 1)
|
||||
for _, acc := range c.Accounts {
|
||||
emit.AppCall(w.BinWriter, neoHash, "registerCandidate", callflag.States, acc.PrivateKey().PublicKey().Bytes())
|
||||
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, 1)
|
||||
for _, acc := range c.Accounts[start:end] {
|
||||
emit.AppCall(w.BinWriter, neo.Hash, "registerCandidate", callflag.States, acc.PrivateKey().PublicKey().Bytes())
|
||||
emit.Opcodes(w.BinWriter, opcode.ASSERT)
|
||||
}
|
||||
emit.AppCall(w.BinWriter, neoHash, "setRegisterPrice", callflag.States, regPrice)
|
||||
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice)
|
||||
if w.Err != nil {
|
||||
panic(fmt.Sprintf("BUG: %v", w.Err))
|
||||
}
|
||||
|
@ -53,14 +45,14 @@ func (c *initializeContext) registerCandidates() error {
|
|||
Signer: c.getSigner(false, c.CommitteeAcc),
|
||||
Account: c.CommitteeAcc,
|
||||
}}
|
||||
for i := range c.Accounts {
|
||||
for _, acc := range c.Accounts[start:end] {
|
||||
signers = append(signers, rpcclient.SignerAccount{
|
||||
Signer: transaction.Signer{
|
||||
Account: c.Accounts[i].Contract.ScriptHash(),
|
||||
Account: acc.Contract.ScriptHash(),
|
||||
Scopes: transaction.CustomContracts,
|
||||
AllowedContracts: []util.Uint160{neoHash},
|
||||
AllowedContracts: []util.Uint160{neo.Hash},
|
||||
},
|
||||
Account: c.Accounts[i],
|
||||
Account: acc,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -73,8 +65,8 @@ func (c *initializeContext) registerCandidates() error {
|
|||
}
|
||||
|
||||
network := c.CommitteeAct.GetNetwork()
|
||||
for i := range c.Accounts {
|
||||
if err := c.Accounts[i].SignTx(network, tx); err != nil {
|
||||
for _, acc := range c.Accounts[start:end] {
|
||||
if err := acc.SignTx(network, tx); err != nil {
|
||||
return fmt.Errorf("can't sign a transaction: %w", err)
|
||||
}
|
||||
}
|
||||
|
@ -82,6 +74,39 @@ func (c *initializeContext) registerCandidates() error {
|
|||
return c.sendTx(tx, c.Command, true)
|
||||
}
|
||||
|
||||
func (c *initializeContext) registerCandidates() error {
|
||||
cc, err := unwrap.Array(c.ReadOnlyInvoker.Call(neo.Hash, "getCandidates"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("`getCandidates`: %w", err)
|
||||
}
|
||||
|
||||
need := len(c.Accounts)
|
||||
have := len(cc)
|
||||
|
||||
if need == have {
|
||||
c.Command.Println("Candidates are already registered.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Register candidates in batches in order to overcome the signers amount limit.
|
||||
// See: https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/transaction/transaction.go#L27
|
||||
for i := 0; i < need; i += registerBatchSize {
|
||||
start, end := i, i+registerBatchSize
|
||||
if end > need {
|
||||
end = need
|
||||
}
|
||||
// This check is sound because transactions are accepted/rejected atomically.
|
||||
if have >= end {
|
||||
continue
|
||||
}
|
||||
if err := c.registerCandidateRange(start, end); err != nil {
|
||||
return fmt.Errorf("registering candidates %d..%d: %q", start, end-1, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *initializeContext) transferNEOToAlphabetContracts() error {
|
||||
neoHash := neo.Hash
|
||||
|
||||
|
@ -116,10 +141,11 @@ func (c *initializeContext) transferNEOFinished(neoHash util.Uint160) (bool, err
|
|||
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
|
||||
|
||||
func (c *initializeContext) getCandidateRegisterPrice() (int64, error) {
|
||||
switch ct := c.Client.(type) {
|
||||
switch c.Client.(type) {
|
||||
case *rpcclient.Client:
|
||||
//lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
|
||||
return ct.GetCandidateRegisterPrice()
|
||||
inv := invoker.New(c.Client, nil)
|
||||
reader := neo.NewReader(inv)
|
||||
return reader.GetRegisterPrice()
|
||||
default:
|
||||
neoHash := neo.Hash
|
||||
res, err := invokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||
|
@ -36,6 +37,12 @@ func TestInitialize(t *testing.T) {
|
|||
t.Run("7 nodes", func(t *testing.T) {
|
||||
testInitialize(t, 7)
|
||||
})
|
||||
t.Run("16 nodes", func(t *testing.T) {
|
||||
testInitialize(t, 16)
|
||||
})
|
||||
t.Run("22 nodes", func(t *testing.T) {
|
||||
testInitialize(t, 22)
|
||||
})
|
||||
}
|
||||
|
||||
func testInitialize(t *testing.T, committeeSize int) {
|
||||
|
@ -101,11 +108,10 @@ func generateTestData(t *testing.T, dir string, size int) {
|
|||
cfg := config.Config{}
|
||||
cfg.ProtocolConfiguration.Magic = 12345
|
||||
cfg.ProtocolConfiguration.ValidatorsCount = size
|
||||
cfg.ProtocolConfiguration.SecondsPerBlock = 1 //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
|
||||
cfg.ProtocolConfiguration.TimePerBlock = time.Second
|
||||
cfg.ProtocolConfiguration.StandbyCommittee = pubs // sorted by glagolic letters
|
||||
cfg.ProtocolConfiguration.P2PSigExtensions = true
|
||||
cfg.ProtocolConfiguration.VerifyTransactions = true
|
||||
cfg.ProtocolConfiguration.VerifyBlocks = true //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
|
||||
data, err := yaml.Marshal(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// StringifySubnetClientGroupID returns string representation of SubnetClientGroupID using MarshalText.
|
||||
// Returns a string with a message on error.
|
||||
func StringifySubnetClientGroupID(id *SubnetClientGroupID) string {
|
||||
text, err := id.MarshalText()
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<invalid> %v", err)
|
||||
}
|
||||
|
||||
return string(text)
|
||||
}
|
||||
|
||||
// MarshalText encodes SubnetClientGroupID into text format according to FrostFS API V2 protocol:
|
||||
// value in base-10 integer string format.
|
||||
//
|
||||
// It implements encoding.TextMarshaler.
|
||||
func (x *SubnetClientGroupID) MarshalText() ([]byte, error) {
|
||||
num := x.GetValue() // NPE safe, returns zero on nil
|
||||
|
||||
return []byte(strconv.FormatUint(uint64(num), 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalText decodes the SubnetID from the text according to FrostFS API V2 protocol:
|
||||
// should be base-10 integer string format with bitsize = 32.
|
||||
//
|
||||
// Returns strconv.ErrRange if integer overflows uint32.
|
||||
//
|
||||
// Must not be called on nil.
|
||||
//
|
||||
// Implements encoding.TextUnmarshaler.
|
||||
func (x *SubnetClientGroupID) UnmarshalText(txt []byte) error {
|
||||
num, err := strconv.ParseUint(string(txt), 10, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid numeric value: %w", err)
|
||||
}
|
||||
|
||||
x.SetNumber(uint32(num))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal encodes the SubnetClientGroupID into a binary format of FrostFS API V2 protocol
|
||||
// (Protocol Buffers with direct field order).
|
||||
func (x *SubnetClientGroupID) Marshal() ([]byte, error) {
|
||||
return proto.Marshal(x)
|
||||
}
|
||||
|
||||
// Unmarshal decodes the SubnetClientGroupID from FrostFS API V2 binary format (see Marshal). Must not be called on nil.
|
||||
func (x *SubnetClientGroupID) Unmarshal(data []byte) error {
|
||||
return proto.Unmarshal(data, x)
|
||||
}
|
||||
|
||||
// SetNumber sets SubnetClientGroupID value in uint32 format. Must not be called on nil.
|
||||
// By default, number is 0.
|
||||
func (x *SubnetClientGroupID) SetNumber(num uint32) {
|
||||
x.Value = num
|
||||
}
|
Binary file not shown.
|
@ -1,15 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package neo.fs.v2.refs;
|
||||
|
||||
option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/internal";
|
||||
|
||||
// Client group identifier in the FrostFS subnet.
|
||||
//
|
||||
// String representation of a value is base-10 integer.
|
||||
//
|
||||
// JSON representation is an object containing single `value` number field.
|
||||
message SubnetClientGroupID {
|
||||
// 4-byte integer identifier of the subnet client group.
|
||||
fixed32 value = 1 [json_name = "value"];
|
||||
}
|
|
@ -248,7 +248,7 @@ func (l *localClient) GetVersion() (*result.Version, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (l *localClient) InvokeContractVerify(contract util.Uint160, params []smartcontract.Parameter, signers []transaction.Signer, witnesses ...transaction.Witness) (*result.Invoke, error) {
|
||||
func (l *localClient) InvokeContractVerify(util.Uint160, []smartcontract.Parameter, []transaction.Signer, ...transaction.Witness) (*result.Invoke, error) {
|
||||
// not used by `morph init` command
|
||||
panic("unexpected call")
|
||||
}
|
||||
|
|
46
cmd/frostfs-adm/internal/modules/morph/netmap_util.go
Normal file
46
cmd/frostfs-adm/internal/modules/morph/netmap_util.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package morph
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func getDefaultNetmapContractConfigMap() map[string]any {
|
||||
m := make(map[string]any)
|
||||
m[netmap.EpochDurationConfig] = viper.GetInt64(epochDurationInitFlag)
|
||||
m[netmap.MaxObjectSizeConfig] = viper.GetInt64(maxObjectSizeInitFlag)
|
||||
m[netmap.AuditFeeConfig] = viper.GetInt64(auditFeeInitFlag)
|
||||
m[netmap.ContainerFeeConfig] = viper.GetInt64(containerFeeInitFlag)
|
||||
m[netmap.ContainerAliasFeeConfig] = viper.GetInt64(containerAliasFeeInitFlag)
|
||||
m[netmap.BasicIncomeRateConfig] = viper.GetInt64(incomeRateInitFlag)
|
||||
m[netmap.IrCandidateFeeConfig] = viper.GetInt64(candidateFeeInitFlag)
|
||||
m[netmap.WithdrawFeeConfig] = viper.GetInt64(withdrawFeeInitFlag)
|
||||
m[netmap.HomomorphicHashingDisabledKey] = viper.GetBool(homomorphicHashDisabledInitFlag)
|
||||
m[netmap.MaintenanceModeAllowedConfig] = viper.GetBool(maintenanceModeAllowedInitFlag)
|
||||
return m
|
||||
}
|
||||
|
||||
func parseConfigFromNetmapContract(arr []stackitem.Item) (map[string][]byte, error) {
|
||||
m := make(map[string][]byte, len(arr))
|
||||
for _, param := range arr {
|
||||
tuple, ok := param.Value().([]stackitem.Item)
|
||||
if !ok || len(tuple) != 2 {
|
||||
return nil, errors.New("invalid ListConfig response from netmap contract")
|
||||
}
|
||||
|
||||
k, err := tuple[0].TryBytes()
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid config key from netmap contract")
|
||||
}
|
||||
|
||||
v, err := tuple[1].TryBytes()
|
||||
if err != nil {
|
||||
return nil, invalidConfigValueErr(string(k))
|
||||
}
|
||||
m[string(k)] = v
|
||||
}
|
||||
return m, nil
|
||||
}
|
|
@ -255,7 +255,6 @@ func init() {
|
|||
initRestoreContainersCmd()
|
||||
initListContainersCmd()
|
||||
initRefillGasCmd()
|
||||
initSubnetCmd()
|
||||
initDepositoryNotaryCmd()
|
||||
initNetmapCandidatesCmd()
|
||||
}
|
||||
|
@ -274,10 +273,6 @@ func initDepositoryNotaryCmd() {
|
|||
depositNotaryCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
|
||||
}
|
||||
|
||||
func initSubnetCmd() {
|
||||
RootCmd.AddCommand(cmdSubnet)
|
||||
}
|
||||
|
||||
func initRefillGasCmd() {
|
||||
RootCmd.AddCommand(refillGasCmd)
|
||||
refillGasCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
|
||||
|
@ -314,7 +309,8 @@ func initUpdateContractsCmd() {
|
|||
RootCmd.AddCommand(updateContractsCmd)
|
||||
updateContractsCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
|
||||
updateContractsCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
|
||||
updateContractsCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts (default fetched from latest github release)")
|
||||
updateContractsCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts")
|
||||
_ = updateContractsCmd.MarkFlagRequired(contractsInitFlag)
|
||||
}
|
||||
|
||||
func initDumpBalancesCmd() {
|
||||
|
@ -375,7 +371,8 @@ func initInitCmd() {
|
|||
RootCmd.AddCommand(initCmd)
|
||||
initCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
|
||||
initCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
|
||||
initCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts (default fetched from latest github release)")
|
||||
initCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts")
|
||||
_ = initCmd.MarkFlagRequired(contractsInitFlag)
|
||||
initCmd.Flags().Uint(epochDurationCLIFlag, 240, "Amount of side chain blocks in one FrostFS epoch")
|
||||
initCmd.Flags().Uint(maxObjectSizeCLIFlag, 67108864, "Max single object size in bytes")
|
||||
initCmd.Flags().Bool(homomorphicHashDisabledCLIFlag, false, "Disable object homomorphic hashing")
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -52,7 +52,7 @@ func Execute() error {
|
|||
return rootCmd.Execute()
|
||||
}
|
||||
|
||||
func entryPoint(cmd *cobra.Command, args []string) error {
|
||||
func entryPoint(cmd *cobra.Command, _ []string) error {
|
||||
printVersion, _ := cmd.Flags().GetBool("version")
|
||||
if printVersion {
|
||||
cmd.Print(misc.BuildInfo("FrostFS Adm"))
|
||||
|
|
|
@ -12,9 +12,6 @@ node:
|
|||
- {{ .AnnouncedAddress }}
|
||||
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
|
||||
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
|
||||
subnet:
|
||||
exit_zero: false # toggle entrance to zero subnet (overrides corresponding attribute and occurrence in entries)
|
||||
entries: [] # list of IDs of subnets to enter in a text format of FrostFS API protocol (overrides corresponding attributes)
|
||||
|
||||
grpc:
|
||||
num: 1 # total number of listener endpoints
|
||||
|
|
|
@ -26,7 +26,6 @@ If set to '0' or not set, only the current epoch is used.
|
|||
List of commands with support of extended headers:
|
||||
* `container list-objects`
|
||||
* `object delete/get/hash/head/lock/put/range/search`
|
||||
* `storagegroup delete/get/list/put`
|
||||
|
||||
Example:
|
||||
```shell
|
||||
|
|
|
@ -329,6 +329,8 @@ func CreateSession(prm CreateSessionPrm) (res CreateSessionRes, err error) {
|
|||
type PutObjectPrm struct {
|
||||
commonObjectPrm
|
||||
|
||||
copyNum []uint32
|
||||
|
||||
hdr *object.Object
|
||||
|
||||
rdr io.Reader
|
||||
|
@ -352,6 +354,12 @@ func (x *PutObjectPrm) SetHeaderCallback(f func(*object.Object)) {
|
|||
x.headerCallback = f
|
||||
}
|
||||
|
||||
// SetCopiesNumberByVectors sets ordered list of minimal required object copies numbers
|
||||
// per placement vector.
|
||||
func (x *PutObjectPrm) SetCopiesNumberByVectors(copiesNumbers []uint32) {
|
||||
x.copyNum = copiesNumbers
|
||||
}
|
||||
|
||||
// PutObjectRes groups the resulting values of PutObject operation.
|
||||
type PutObjectRes struct {
|
||||
id oid.ID
|
||||
|
@ -381,6 +389,7 @@ func PutObject(prm PutObjectPrm) (*PutObjectRes, error) {
|
|||
}
|
||||
|
||||
putPrm.WithXHeaders(prm.xHeaders...)
|
||||
putPrm.SetCopiesNumberByVectors(prm.copyNum)
|
||||
|
||||
wrt, err := prm.cli.ObjectPutInit(context.Background(), putPrm)
|
||||
if err != nil {
|
||||
|
|
|
@ -36,11 +36,11 @@ func getSDKClientByFlag(cmd *cobra.Command, key *ecdsa.PrivateKey, endpointFlag
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("%v: %w", errInvalidEndpoint, err)
|
||||
}
|
||||
return GetSDKClient(cmd, key, addr)
|
||||
return GetSDKClient(cmd.Context(), cmd, key, addr)
|
||||
}
|
||||
|
||||
// GetSDKClient returns default frostfs-sdk-go client.
|
||||
func GetSDKClient(cmd *cobra.Command, key *ecdsa.PrivateKey, addr network.Address) (*client.Client, error) {
|
||||
func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey, addr network.Address) (*client.Client, error) {
|
||||
var (
|
||||
c client.Client
|
||||
prmInit client.PrmInit
|
||||
|
@ -62,7 +62,7 @@ func GetSDKClient(cmd *cobra.Command, key *ecdsa.PrivateKey, addr network.Addres
|
|||
|
||||
c.Init(prmInit)
|
||||
|
||||
if err := c.Dial(prmDial); err != nil {
|
||||
if err := c.Dial(ctx, prmDial); err != nil {
|
||||
return nil, fmt.Errorf("can't init SDK client: %w", err)
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ func GetCurrentEpoch(ctx context.Context, cmd *cobra.Command, endpoint string) (
|
|||
return 0, fmt.Errorf("can't generate key to sign query: %w", err)
|
||||
}
|
||||
|
||||
c, err := GetSDKClient(cmd, key, addr)
|
||||
c, err := GetSDKClient(ctx, cmd, key, addr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -30,7 +29,6 @@ var (
|
|||
containerNnsName string
|
||||
containerNnsZone string
|
||||
containerNoTimestamp bool
|
||||
containerSubnet string
|
||||
force bool
|
||||
)
|
||||
|
||||
|
@ -70,15 +68,6 @@ It will be stored in sidechain when inner ring will accepts it.`,
|
|||
}
|
||||
}
|
||||
|
||||
if containerSubnet != "" {
|
||||
var subnetID subnetid.ID
|
||||
|
||||
err = subnetID.DecodeString(containerSubnet)
|
||||
commonCmd.ExitOnErr(cmd, "could not parse subnetID: %w", err)
|
||||
|
||||
placementPolicy.RestrictSubnet(subnetID)
|
||||
}
|
||||
|
||||
var cnr container.Container
|
||||
cnr.Init()
|
||||
|
||||
|
@ -166,7 +155,6 @@ func initContainerCreateCmd() {
|
|||
flags.StringVar(&containerNnsName, "nns-name", "", "Container nns name attribute")
|
||||
flags.StringVar(&containerNnsZone, "nns-zone", "", "Container nns zone attribute")
|
||||
flags.BoolVar(&containerNoTimestamp, "disable-timestamp", false, "Disable timestamp container attribute")
|
||||
flags.StringVar(&containerSubnet, "subnet", "", "String representation of container subnetwork")
|
||||
flags.BoolVarP(&force, commonflags.ForceFlag, commonflags.ForceFlagShorthand, false,
|
||||
"Skip placement validity check")
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -16,12 +17,14 @@ import (
|
|||
const (
|
||||
flagListPrintAttr = "with-attr"
|
||||
flagListContainerOwner = "owner"
|
||||
flagListName = "name"
|
||||
)
|
||||
|
||||
// flag vars of list command.
|
||||
var (
|
||||
flagVarListPrintAttr bool
|
||||
flagVarListContainerOwner string
|
||||
flagVarListName string
|
||||
)
|
||||
|
||||
var listContainersCmd = &cobra.Command{
|
||||
|
@ -52,24 +55,33 @@ var listContainersCmd = &cobra.Command{
|
|||
var prmGet internalclient.GetContainerPrm
|
||||
prmGet.SetClient(cli)
|
||||
|
||||
list := res.IDList()
|
||||
for i := range list {
|
||||
cmd.Println(list[i].String())
|
||||
containerIDs := res.IDList()
|
||||
for _, cnrID := range containerIDs {
|
||||
if flagVarListName == "" && !flagVarListPrintAttr {
|
||||
cmd.Println(cnrID.String())
|
||||
continue
|
||||
}
|
||||
|
||||
prmGet.SetContainer(cnrID)
|
||||
res, err := internalclient.GetContainer(prmGet)
|
||||
if err != nil {
|
||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
cnr := res.Container()
|
||||
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
|
||||
continue
|
||||
}
|
||||
cmd.Println(cnrID.String())
|
||||
|
||||
if flagVarListPrintAttr {
|
||||
prmGet.SetContainer(list[i])
|
||||
|
||||
res, err := internalclient.GetContainer(prmGet)
|
||||
if err == nil {
|
||||
res.Container().IterateAttributes(func(key, val string) {
|
||||
if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
|
||||
// FIXME(@cthulhu-rider): neofs-sdk-go#314 use dedicated method to skip system attributes
|
||||
cmd.Printf(" %s: %s\n", key, val)
|
||||
}
|
||||
})
|
||||
} else {
|
||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||
}
|
||||
cnr.IterateAttributes(func(key, val string) {
|
||||
if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
|
||||
// FIXME(@cthulhu-rider): neofs-sdk-go#314 use dedicated method to skip system attributes
|
||||
cmd.Printf(" %s: %s\n", key, val)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -80,6 +92,9 @@ func initContainerListContainersCmd() {
|
|||
|
||||
flags := listContainersCmd.Flags()
|
||||
|
||||
flags.StringVar(&flagVarListName, flagListName, "",
|
||||
"List containers by the attribute name",
|
||||
)
|
||||
flags.StringVar(&flagVarListContainerOwner, flagListContainerOwner, "",
|
||||
"Owner of containers (omit to use owner from private key)",
|
||||
)
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const ignoreErrorsFlag = "no-errors"
|
||||
|
||||
var evacuateShardCmd = &cobra.Command{
|
||||
Use: "evacuate",
|
||||
Short: "Evacuate objects from shard",
|
||||
|
@ -20,7 +22,7 @@ func evacuateShard(cmd *cobra.Command, _ []string) {
|
|||
|
||||
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
|
||||
req.Body.Shard_ID = getShardIDList(cmd)
|
||||
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(dumpIgnoreErrorsFlag)
|
||||
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
|
@ -47,7 +49,7 @@ func initControlEvacuateShardCmd() {
|
|||
flags := evacuateShardCmd.Flags()
|
||||
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
|
||||
flags.Bool(shardAllFlag, false, "Process all shards")
|
||||
flags.Bool(dumpIgnoreErrorsFlag, false, "Skip invalid/unreadable objects")
|
||||
flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
|
||||
|
||||
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
|
||||
}
|
||||
|
|
17
cmd/frostfs-cli/modules/control/ir.go
Normal file
17
cmd/frostfs-cli/modules/control/ir.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
package control
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
var irCmd = &cobra.Command{
|
||||
Use: "ir",
|
||||
Short: "Operations with inner ring nodes",
|
||||
Long: "Operations with inner ring nodes",
|
||||
}
|
||||
|
||||
func initControlIRCmd() {
|
||||
irCmd.AddCommand(tickEpochCmd)
|
||||
irCmd.AddCommand(removeNodeCmd)
|
||||
|
||||
initControlIRTickEpochCmd()
|
||||
initControlIRRemoveNodeCmd()
|
||||
}
|
58
cmd/frostfs-cli/modules/control/ir_remove_node.go
Normal file
58
cmd/frostfs-cli/modules/control/ir_remove_node.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
|
||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
||||
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var removeNodeCmd = &cobra.Command{
|
||||
Use: "remove-node",
|
||||
Short: "Forces a node removal from netmap",
|
||||
Long: "Forces a node removal from netmap via a notary request. It should be executed on other IR nodes as well.",
|
||||
Run: removeNode,
|
||||
}
|
||||
|
||||
func initControlIRRemoveNodeCmd() {
|
||||
initControlFlags(removeNodeCmd)
|
||||
|
||||
flags := removeNodeCmd.Flags()
|
||||
flags.String("node", "", "Node public key as a hex string")
|
||||
_ = removeNodeCmd.MarkFlagRequired("node")
|
||||
}
|
||||
|
||||
func removeNode(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
c := getClient(cmd, pk)
|
||||
|
||||
nodeKeyStr, _ := cmd.Flags().GetString("node")
|
||||
if len(nodeKeyStr) == 0 {
|
||||
commonCmd.ExitOnErr(cmd, "parsing node public key: ", errors.New("key cannot be empty"))
|
||||
}
|
||||
nodeKey, err := hex.DecodeString(nodeKeyStr)
|
||||
commonCmd.ExitOnErr(cmd, "can't decode node public key: %w", err)
|
||||
|
||||
req := new(ircontrol.RemoveNodeRequest)
|
||||
req.SetBody(&ircontrol.RemoveNodeRequest_Body{
|
||||
Key: nodeKey,
|
||||
})
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "could not sign request: %w", ircontrolsrv.SignMessage(pk, req))
|
||||
|
||||
var resp *ircontrol.RemoveNodeResponse
|
||||
err = c.ExecRaw(func(client *rawclient.Client) error {
|
||||
resp, err = ircontrol.RemoveNode(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Node removed")
|
||||
}
|
43
cmd/frostfs-cli/modules/control/ir_tick_epoch.go
Normal file
43
cmd/frostfs-cli/modules/control/ir_tick_epoch.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
||||
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var tickEpochCmd = &cobra.Command{
|
||||
Use: "tick-epoch",
|
||||
Short: "Forces a new epoch",
|
||||
Long: "Forces a new epoch via a notary request. It should be executed on other IR nodes as well.",
|
||||
Run: tickEpoch,
|
||||
}
|
||||
|
||||
func initControlIRTickEpochCmd() {
|
||||
initControlFlags(tickEpochCmd)
|
||||
}
|
||||
|
||||
func tickEpoch(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
c := getClient(cmd, pk)
|
||||
|
||||
req := new(ircontrol.TickEpochRequest)
|
||||
req.SetBody(new(ircontrol.TickEpochRequest_Body))
|
||||
|
||||
err := ircontrolsrv.SignMessage(pk, req)
|
||||
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
|
||||
|
||||
var resp *ircontrol.TickEpochResponse
|
||||
err = c.ExecRaw(func(client *rawclient.Client) error {
|
||||
resp, err = ircontrol.TickEpoch(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Epoch tick requested")
|
||||
}
|
|
@ -33,6 +33,7 @@ func init() {
|
|||
dropObjectsCmd,
|
||||
shardsCmd,
|
||||
synchronizeTreeCmd,
|
||||
irCmd,
|
||||
)
|
||||
|
||||
initControlHealthCheckCmd()
|
||||
|
@ -40,4 +41,5 @@ func init() {
|
|||
initControlDropObjectsCmd()
|
||||
initControlShardsCmd()
|
||||
initControlSynchronizeTreeCmd()
|
||||
initControlIRCmd()
|
||||
}
|
||||
|
|
|
@ -13,16 +13,12 @@ var shardsCmd = &cobra.Command{
|
|||
func initControlShardsCmd() {
|
||||
shardsCmd.AddCommand(listShardsCmd)
|
||||
shardsCmd.AddCommand(setShardModeCmd)
|
||||
shardsCmd.AddCommand(dumpShardCmd)
|
||||
shardsCmd.AddCommand(restoreShardCmd)
|
||||
shardsCmd.AddCommand(evacuateShardCmd)
|
||||
shardsCmd.AddCommand(flushCacheCmd)
|
||||
shardsCmd.AddCommand(doctorCmd)
|
||||
|
||||
initControlShardsListCmd()
|
||||
initControlSetShardModeCmd()
|
||||
initControlDumpShardCmd()
|
||||
initControlRestoreShardCmd()
|
||||
initControlEvacuateShardCmd()
|
||||
initControlFlushCacheCmd()
|
||||
initControlDoctorCmd()
|
||||
|
|
|
@ -1,66 +0,0 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
dumpFilepathFlag = "path"
|
||||
dumpIgnoreErrorsFlag = "no-errors"
|
||||
)
|
||||
|
||||
var dumpShardCmd = &cobra.Command{
|
||||
Use: "dump",
|
||||
Short: "Dump objects from shard",
|
||||
Long: "Dump objects from shard to a file",
|
||||
Run: dumpShard,
|
||||
}
|
||||
|
||||
func dumpShard(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
body := new(control.DumpShardRequest_Body)
|
||||
body.SetShardID(getShardID(cmd))
|
||||
|
||||
p, _ := cmd.Flags().GetString(dumpFilepathFlag)
|
||||
body.SetFilepath(p)
|
||||
|
||||
ignore, _ := cmd.Flags().GetBool(dumpIgnoreErrorsFlag)
|
||||
body.SetIgnoreErrors(ignore)
|
||||
|
||||
req := new(control.DumpShardRequest)
|
||||
req.SetBody(body)
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.DumpShardResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.DumpShard(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Shard has been dumped successfully.")
|
||||
}
|
||||
|
||||
func initControlDumpShardCmd() {
|
||||
initControlFlags(dumpShardCmd)
|
||||
|
||||
flags := dumpShardCmd.Flags()
|
||||
flags.String(shardIDFlag, "", "Shard ID in base58 encoding")
|
||||
flags.String(dumpFilepathFlag, "", "File to write objects to")
|
||||
flags.Bool(dumpIgnoreErrorsFlag, false, "Skip invalid/unreadable objects")
|
||||
|
||||
_ = dumpShardCmd.MarkFlagRequired(shardIDFlag)
|
||||
_ = dumpShardCmd.MarkFlagRequired(dumpFilepathFlag)
|
||||
_ = dumpShardCmd.MarkFlagRequired(controlRPC)
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
restoreFilepathFlag = "path"
|
||||
restoreIgnoreErrorsFlag = "no-errors"
|
||||
)
|
||||
|
||||
var restoreShardCmd = &cobra.Command{
|
||||
Use: "restore",
|
||||
Short: "Restore objects from shard",
|
||||
Long: "Restore objects from shard to a file",
|
||||
Run: restoreShard,
|
||||
}
|
||||
|
||||
func restoreShard(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
body := new(control.RestoreShardRequest_Body)
|
||||
body.SetShardID(getShardID(cmd))
|
||||
|
||||
p, _ := cmd.Flags().GetString(restoreFilepathFlag)
|
||||
body.SetFilepath(p)
|
||||
|
||||
ignore, _ := cmd.Flags().GetBool(restoreIgnoreErrorsFlag)
|
||||
body.SetIgnoreErrors(ignore)
|
||||
|
||||
req := new(control.RestoreShardRequest)
|
||||
req.SetBody(body)
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.RestoreShardResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.RestoreShard(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Shard has been restored successfully.")
|
||||
}
|
||||
|
||||
func initControlRestoreShardCmd() {
|
||||
initControlFlags(restoreShardCmd)
|
||||
|
||||
flags := restoreShardCmd.Flags()
|
||||
flags.String(shardIDFlag, "", "Shard ID in base58 encoding")
|
||||
flags.String(restoreFilepathFlag, "", "File to read objects from")
|
||||
flags.Bool(restoreIgnoreErrorsFlag, false, "Skip invalid/unreadable objects")
|
||||
|
||||
_ = restoreShardCmd.MarkFlagRequired(shardIDFlag)
|
||||
_ = restoreShardCmd.MarkFlagRequired(restoreFilepathFlag)
|
||||
_ = restoreShardCmd.MarkFlagRequired(controlRPC)
|
||||
}
|
|
@ -137,13 +137,6 @@ func setShardMode(cmd *cobra.Command, _ []string) {
|
|||
cmd.Println("Shard mode update request successfully sent.")
|
||||
}
|
||||
|
||||
func getShardID(cmd *cobra.Command) []byte {
|
||||
sid, _ := cmd.Flags().GetString(shardIDFlag)
|
||||
raw, err := base58.Decode(sid)
|
||||
commonCmd.ExitOnErr(cmd, "incorrect shard ID encoding: %w", err)
|
||||
return raw
|
||||
}
|
||||
|
||||
func getShardIDList(cmd *cobra.Command) [][]byte {
|
||||
all, _ := cmd.Flags().GetBool(shardAllFlag)
|
||||
if all {
|
||||
|
|
|
@ -41,8 +41,6 @@ var netInfoCmd = &cobra.Command{
|
|||
cmd.Printf(format, "Audit fee", netInfo.AuditFee())
|
||||
cmd.Printf(format, "Storage price", netInfo.StoragePrice())
|
||||
cmd.Printf(format, "Container fee", netInfo.ContainerFee())
|
||||
cmd.Printf(format, "EigenTrust alpha", netInfo.EigenTrustAlpha())
|
||||
cmd.Printf(format, "Number of EigenTrust iterations", netInfo.NumberOfEigenTrustIterations())
|
||||
cmd.Printf(format, "Epoch duration", netInfo.EpochDuration())
|
||||
cmd.Printf(format, "Inner Ring candidate fee", netInfo.IRCandidateFee())
|
||||
cmd.Printf(format, "Maximum object size", netInfo.MaxObjectSize())
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
const (
|
||||
noProgressFlag = "no-progress"
|
||||
notificationFlag = "notify"
|
||||
copiesNumberFlag = "copies-number"
|
||||
)
|
||||
|
||||
var putExpiredOn uint64
|
||||
|
@ -56,6 +57,8 @@ func initObjectPutCmd() {
|
|||
|
||||
flags.String(notificationFlag, "", "Object notification in the form of *epoch*:*topic*; '-' topic means using default")
|
||||
flags.Bool(binaryFlag, false, "Deserialize object structure from given file.")
|
||||
|
||||
flags.String(copiesNumberFlag, "", "Number of copies of the object to store within the RPC call")
|
||||
}
|
||||
|
||||
func putObject(cmd *cobra.Command, _ []string) {
|
||||
|
@ -116,6 +119,18 @@ func putObject(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
}
|
||||
|
||||
copyNum, err := cmd.Flags().GetString(copiesNumberFlag)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err)
|
||||
if len(copyNum) > 0 {
|
||||
var cn []uint32
|
||||
for _, num := range strings.Split(copyNum, ",") {
|
||||
val, err := strconv.ParseUint(num, 10, 32)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err)
|
||||
cn = append(cn, uint32(val))
|
||||
}
|
||||
prm.SetCopiesNumberByVectors(cn)
|
||||
}
|
||||
|
||||
res, err := internalclient.PutObject(prm)
|
||||
if p != nil {
|
||||
p.Finish()
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
netmapCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/netmap"
|
||||
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
|
||||
sessionCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/session"
|
||||
sgCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/storagegroup"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/tree"
|
||||
utilCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
|
@ -84,7 +83,6 @@ func init() {
|
|||
rootCmd.AddCommand(utilCli.Cmd)
|
||||
rootCmd.AddCommand(netmapCli.Cmd)
|
||||
rootCmd.AddCommand(objectCli.Cmd)
|
||||
rootCmd.AddCommand(sgCli.Cmd)
|
||||
rootCmd.AddCommand(containerCli.Cmd)
|
||||
rootCmd.AddCommand(tree.Cmd)
|
||||
rootCmd.AddCommand(gendoc.Command(rootCmd))
|
||||
|
|
|
@ -54,7 +54,7 @@ func createSession(cmd *cobra.Command, _ []string) {
|
|||
addrStr, _ := cmd.Flags().GetString(commonflags.RPC)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse endpoint: %w", netAddr.FromString(addrStr))
|
||||
|
||||
c, err := internalclient.GetSDKClient(cmd, privKey, netAddr)
|
||||
c, err := internalclient.GetSDKClient(cmd.Context(), cmd, privKey, netAddr)
|
||||
commonCmd.ExitOnErr(cmd, "can't create client: %w", err)
|
||||
|
||||
lifetime := uint64(defaultLifetime)
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
package storagegroup
|
||||
|
||||
import (
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var sgDelCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete storage group from FrostFS",
|
||||
Long: "Delete storage group from FrostFS",
|
||||
Run: delSG,
|
||||
}
|
||||
|
||||
func initSGDeleteCmd() {
|
||||
commonflags.Init(sgDelCmd)
|
||||
|
||||
flags := sgDelCmd.Flags()
|
||||
|
||||
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
_ = sgDelCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||
|
||||
flags.StringVarP(&sgID, sgIDFlag, "", "", "Storage group identifier")
|
||||
_ = sgDelCmd.MarkFlagRequired(sgIDFlag)
|
||||
}
|
||||
|
||||
func delSG(cmd *cobra.Command, _ []string) {
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
|
||||
var cnr cid.ID
|
||||
var obj oid.ID
|
||||
|
||||
addr := readObjectAddress(cmd, &cnr, &obj)
|
||||
|
||||
var prm internalclient.DeleteObjectPrm
|
||||
objectCli.OpenSession(cmd, &prm, pk, cnr, &obj)
|
||||
objectCli.Prepare(cmd, &prm)
|
||||
prm.SetAddress(addr)
|
||||
|
||||
res, err := internalclient.DeleteObject(prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
tombstone := res.Tombstone()
|
||||
|
||||
cmd.Println("Storage group removed successfully.")
|
||||
cmd.Printf(" Tombstone: %s\n", tombstone)
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
package storagegroup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
storagegroupSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var sgID string
|
||||
|
||||
var sgGetCmd = &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get storage group from FrostFS",
|
||||
Long: "Get storage group from FrostFS",
|
||||
Run: getSG,
|
||||
}
|
||||
|
||||
func initSGGetCmd() {
|
||||
commonflags.Init(sgGetCmd)
|
||||
|
||||
flags := sgGetCmd.Flags()
|
||||
|
||||
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
_ = sgGetCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||
|
||||
flags.StringVarP(&sgID, sgIDFlag, "", "", "Storage group identifier")
|
||||
_ = sgGetCmd.MarkFlagRequired(sgIDFlag)
|
||||
|
||||
flags.Bool(sgRawFlag, false, "Set raw request option")
|
||||
}
|
||||
|
||||
func getSG(cmd *cobra.Command, _ []string) {
|
||||
var cnr cid.ID
|
||||
var obj oid.ID
|
||||
|
||||
addr := readObjectAddress(cmd, &cnr, &obj)
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
|
||||
var prm internalclient.GetObjectPrm
|
||||
objectCli.Prepare(cmd, &prm)
|
||||
prm.SetClient(cli)
|
||||
|
||||
raw, _ := cmd.Flags().GetBool(sgRawFlag)
|
||||
prm.SetRawFlag(raw)
|
||||
prm.SetAddress(addr)
|
||||
prm.SetPayloadWriter(buf)
|
||||
|
||||
res, err := internalclient.GetObject(prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
rawObj := res.Header()
|
||||
rawObj.SetPayload(buf.Bytes())
|
||||
|
||||
var sg storagegroupSDK.StorageGroup
|
||||
|
||||
err = storagegroupSDK.ReadFromObject(&sg, *rawObj)
|
||||
commonCmd.ExitOnErr(cmd, "could not read storage group from the obj: %w", err)
|
||||
|
||||
cmd.Printf("The last active epoch: %d\n", sg.ExpirationEpoch())
|
||||
cmd.Printf("Group size: %d\n", sg.ValidationDataSize())
|
||||
common.PrintChecksum(cmd, "Group hash", sg.ValidationDataHash)
|
||||
|
||||
if members := sg.Members(); len(members) > 0 {
|
||||
cmd.Println("Members:")
|
||||
|
||||
for i := range members {
|
||||
cmd.Printf("\t%s\n", members[i].String())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
package storagegroup
|
||||
|
||||
import (
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/storagegroup"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var sgListCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List storage groups in FrostFS container",
|
||||
Long: "List storage groups in FrostFS container",
|
||||
Run: listSG,
|
||||
}
|
||||
|
||||
func initSGListCmd() {
|
||||
commonflags.Init(sgListCmd)
|
||||
|
||||
sgListCmd.Flags().String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
_ = sgListCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||
}
|
||||
|
||||
func listSG(cmd *cobra.Command, _ []string) {
|
||||
var cnr cid.ID
|
||||
readCID(cmd, &cnr)
|
||||
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
|
||||
var prm internalclient.SearchObjectsPrm
|
||||
objectCli.Prepare(cmd, &prm)
|
||||
prm.SetClient(cli)
|
||||
prm.SetContainerID(cnr)
|
||||
prm.SetFilters(storagegroup.SearchQuery())
|
||||
|
||||
res, err := internalclient.SearchObjects(prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
ids := res.IDList()
|
||||
|
||||
cmd.Printf("Found %d storage groups.\n", len(ids))
|
||||
|
||||
for i := range ids {
|
||||
cmd.Println(ids[i].String())
|
||||
}
|
||||
}
|
|
@ -1,145 +0,0 @@
|
|||
package storagegroup
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/storagegroup"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
storagegroupSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const sgMembersFlag = "members"
|
||||
|
||||
var sgMembers []string
|
||||
|
||||
var sgPutCmd = &cobra.Command{
|
||||
Use: "put",
|
||||
Short: "Put storage group to FrostFS",
|
||||
Long: "Put storage group to FrostFS",
|
||||
Run: putSG,
|
||||
}
|
||||
|
||||
func initSGPutCmd() {
|
||||
commonflags.Init(sgPutCmd)
|
||||
|
||||
flags := sgPutCmd.Flags()
|
||||
|
||||
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
_ = sgPutCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||
|
||||
flags.StringSliceVarP(&sgMembers, sgMembersFlag, "m", nil, "ID list of storage group members")
|
||||
_ = sgPutCmd.MarkFlagRequired(sgMembersFlag)
|
||||
|
||||
flags.Uint64(commonflags.Lifetime, 0, "Storage group lifetime in epochs")
|
||||
_ = sgPutCmd.MarkFlagRequired(commonflags.Lifetime)
|
||||
}
|
||||
|
||||
func putSG(cmd *cobra.Command, _ []string) {
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
|
||||
var ownerID user.ID
|
||||
user.IDFromKey(&ownerID, pk.PublicKey)
|
||||
|
||||
var cnr cid.ID
|
||||
readCID(cmd, &cnr)
|
||||
|
||||
members := make([]oid.ID, len(sgMembers))
|
||||
uniqueFilter := make(map[oid.ID]struct{}, len(sgMembers))
|
||||
|
||||
for i := range sgMembers {
|
||||
err := members[i].DecodeString(sgMembers[i])
|
||||
commonCmd.ExitOnErr(cmd, "could not parse object ID: %w", err)
|
||||
|
||||
if _, alreadyExists := uniqueFilter[members[i]]; alreadyExists {
|
||||
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("%s member in not unique", members[i]))
|
||||
}
|
||||
|
||||
uniqueFilter[members[i]] = struct{}{}
|
||||
}
|
||||
|
||||
var (
|
||||
headPrm internalclient.HeadObjectPrm
|
||||
putPrm internalclient.PutObjectPrm
|
||||
getCnrPrm internalclient.GetContainerPrm
|
||||
)
|
||||
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
getCnrPrm.SetClient(cli)
|
||||
getCnrPrm.SetContainer(cnr)
|
||||
|
||||
resGetCnr, err := internalclient.GetContainer(getCnrPrm)
|
||||
commonCmd.ExitOnErr(cmd, "get container RPC call: %w", err)
|
||||
|
||||
objectCli.OpenSessionViaClient(cmd, &putPrm, cli, pk, cnr, nil)
|
||||
objectCli.Prepare(cmd, &headPrm, &putPrm)
|
||||
|
||||
headPrm.SetRawFlag(true)
|
||||
headPrm.SetClient(cli)
|
||||
|
||||
sg, err := storagegroup.CollectMembers(sgHeadReceiver{
|
||||
cmd: cmd,
|
||||
key: pk,
|
||||
ownerID: &ownerID,
|
||||
prm: headPrm,
|
||||
}, cnr, members, !container.IsHomomorphicHashingDisabled(resGetCnr.Container()))
|
||||
commonCmd.ExitOnErr(cmd, "could not collect storage group members: %w", err)
|
||||
|
||||
var netInfoPrm internalclient.NetworkInfoPrm
|
||||
netInfoPrm.SetClient(cli)
|
||||
|
||||
ni, err := internalclient.NetworkInfo(netInfoPrm)
|
||||
commonCmd.ExitOnErr(cmd, "can't fetch network info: %w", err)
|
||||
|
||||
lifetime, _ := cmd.Flags().GetUint64(commonflags.Lifetime)
|
||||
sg.SetExpirationEpoch(ni.NetworkInfo().CurrentEpoch() + lifetime)
|
||||
|
||||
obj := object.New()
|
||||
obj.SetContainerID(cnr)
|
||||
obj.SetOwnerID(&ownerID)
|
||||
|
||||
storagegroupSDK.WriteToObject(*sg, obj)
|
||||
|
||||
putPrm.SetHeader(obj)
|
||||
|
||||
res, err := internalclient.PutObject(putPrm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
cmd.Println("Storage group successfully stored")
|
||||
cmd.Printf(" ID: %s\n CID: %s\n", res.ID(), cnr)
|
||||
}
|
||||
|
||||
type sgHeadReceiver struct {
|
||||
cmd *cobra.Command
|
||||
key *ecdsa.PrivateKey
|
||||
ownerID *user.ID
|
||||
prm internalclient.HeadObjectPrm
|
||||
}
|
||||
|
||||
func (c sgHeadReceiver) Head(addr oid.Address) (any, error) {
|
||||
c.prm.SetAddress(addr)
|
||||
|
||||
res, err := internalclient.HeadObject(c.prm)
|
||||
|
||||
var errSplitInfo *object.SplitInfoError
|
||||
|
||||
switch {
|
||||
default:
|
||||
return nil, err
|
||||
case err == nil:
|
||||
return res.Header(), nil
|
||||
case errors.As(err, &errSplitInfo):
|
||||
return errSplitInfo.SplitInfo(), nil
|
||||
}
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package storagegroup
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Cmd represents the storagegroup command.
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "storagegroup",
|
||||
Short: "Operations with Storage Groups",
|
||||
Long: `Operations with Storage Groups`,
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
// bind exactly that cmd's flags to
|
||||
// the viper before execution
|
||||
commonflags.Bind(cmd)
|
||||
commonflags.BindAPI(cmd)
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
sgIDFlag = "id"
|
||||
sgRawFlag = "raw"
|
||||
)
|
||||
|
||||
func init() {
|
||||
storageGroupChildCommands := []*cobra.Command{
|
||||
sgPutCmd,
|
||||
sgGetCmd,
|
||||
sgListCmd,
|
||||
sgDelCmd,
|
||||
}
|
||||
|
||||
Cmd.AddCommand(storageGroupChildCommands...)
|
||||
|
||||
for _, sgCommand := range storageGroupChildCommands {
|
||||
objectCli.InitBearer(sgCommand)
|
||||
commonflags.InitAPI(sgCommand)
|
||||
}
|
||||
|
||||
initSGPutCmd()
|
||||
initSGGetCmd()
|
||||
initSGListCmd()
|
||||
initSGDeleteCmd()
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package storagegroup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
|
||||
readCID(cmd, cnr)
|
||||
readSGID(cmd, obj)
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(*cnr)
|
||||
addr.SetObject(*obj)
|
||||
return addr
|
||||
}
|
||||
|
||||
func readCID(cmd *cobra.Command, id *cid.ID) {
|
||||
f := cmd.Flag(commonflags.CIDFlag)
|
||||
if f == nil {
|
||||
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("missing container flag (%s)", commonflags.CIDFlag))
|
||||
return
|
||||
}
|
||||
|
||||
err := id.DecodeString(f.Value.String())
|
||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||
}
|
||||
|
||||
func readSGID(cmd *cobra.Command, id *oid.ID) {
|
||||
const flag = "id"
|
||||
|
||||
f := cmd.Flag(flag)
|
||||
if f == nil {
|
||||
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("missing storage group flag (%s)", flag))
|
||||
return
|
||||
}
|
||||
|
||||
err := id.DecodeString(f.Value.String())
|
||||
commonCmd.ExitOnErr(cmd, "decode storage group ID string: %w", err)
|
||||
}
|
80
cmd/frostfs-ir/config.go
Normal file
80
cmd/frostfs-ir/config.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func newConfig() (*viper.Viper, error) {
|
||||
var err error
|
||||
var dv = viper.New()
|
||||
|
||||
defaultConfiguration(dv)
|
||||
|
||||
_, err = configViper.CreateViper(configViper.WithConfigFile(*configFile),
|
||||
configViper.WithConfigDir(*configDir), configViper.WithEnvPrefix(EnvPrefix),
|
||||
configViper.WithViper(dv))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dv, err
|
||||
}
|
||||
|
||||
func reloadConfig() error {
|
||||
err := configViper.ReloadViper(configViper.WithConfigFile(*configFile),
|
||||
configViper.WithConfigDir(*configDir), configViper.WithEnvPrefix(EnvPrefix),
|
||||
configViper.WithViper(cfg))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return logPrm.Reload()
|
||||
}
|
||||
|
||||
func watchForSignal(cancel func()) {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-intErr:
|
||||
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
cancel()
|
||||
shutdown()
|
||||
return
|
||||
case sig := <-ch:
|
||||
switch sig {
|
||||
case syscall.SIGHUP:
|
||||
log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
err := reloadConfig()
|
||||
if err != nil {
|
||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
}
|
||||
pprofCmp.reload()
|
||||
metricsCmp.reload()
|
||||
log.Info(logs.FrostFSIRReloadExtraWallets)
|
||||
err = innerRing.SetExtraWallets(cfg)
|
||||
if err != nil {
|
||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
}
|
||||
log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
case syscall.SIGTERM, syscall.SIGINT:
|
||||
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
cancel()
|
||||
shutdown()
|
||||
log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,46 +1,11 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func newConfig(path, directory string) (*viper.Viper, error) {
|
||||
const envPrefix = "FROSTFS_IR"
|
||||
|
||||
var (
|
||||
err error
|
||||
v = viper.New()
|
||||
)
|
||||
|
||||
v.SetEnvPrefix(envPrefix)
|
||||
v.AutomaticEnv()
|
||||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
|
||||
defaultConfiguration(v)
|
||||
|
||||
if path != "" {
|
||||
v.SetConfigFile(path)
|
||||
if strings.HasSuffix(path, ".json") {
|
||||
v.SetConfigType("json")
|
||||
} else {
|
||||
v.SetConfigType("yml")
|
||||
}
|
||||
if err = v.ReadInConfig(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
}
|
||||
|
||||
if directory != "" {
|
||||
err = config.ReadConfigDir(v, directory)
|
||||
}
|
||||
|
||||
return v, err
|
||||
}
|
||||
|
||||
func defaultConfiguration(cfg *viper.Viper) {
|
||||
cfg.SetDefault("logger.level", "info")
|
||||
|
||||
|
@ -68,10 +33,6 @@ func defaultConfiguration(cfg *viper.Viper) {
|
|||
|
||||
setEmitDefaults(cfg)
|
||||
|
||||
setAuditDefaults(cfg)
|
||||
|
||||
setSettlementDefaults(cfg)
|
||||
|
||||
cfg.SetDefault("indexer.cache_timeout", 15*time.Second)
|
||||
|
||||
cfg.SetDefault("locode.db.path", "")
|
||||
|
@ -95,23 +56,6 @@ func setFeeDefaults(cfg *viper.Viper) {
|
|||
cfg.SetDefault("fee.named_container_register", 25_0000_0000) // 25.0 Fixed8
|
||||
}
|
||||
|
||||
func setSettlementDefaults(cfg *viper.Viper) {
|
||||
cfg.SetDefault("settlement.basic_income_rate", 0)
|
||||
cfg.SetDefault("settlement.audit_fee", 0)
|
||||
}
|
||||
|
||||
func setAuditDefaults(cfg *viper.Viper) {
|
||||
cfg.SetDefault("audit.task.exec_pool_size", 10)
|
||||
cfg.SetDefault("audit.task.queue_capacity", 100)
|
||||
cfg.SetDefault("audit.timeout.get", "5s")
|
||||
cfg.SetDefault("audit.timeout.head", "5s")
|
||||
cfg.SetDefault("audit.timeout.rangehash", "5s")
|
||||
cfg.SetDefault("audit.timeout.search", "10s")
|
||||
cfg.SetDefault("audit.pdp.max_sleep_interval", "5s")
|
||||
cfg.SetDefault("audit.pdp.pairs_pool_size", "10")
|
||||
cfg.SetDefault("audit.por.pool_size", "10")
|
||||
}
|
||||
|
||||
func setEmitDefaults(cfg *viper.Viper) {
|
||||
cfg.SetDefault("emit.storage.amount", 0)
|
||||
cfg.SetDefault("emit.mint.cache_size", 1000)
|
||||
|
@ -142,8 +86,6 @@ func setWorkersDefaults(cfg *viper.Viper) {
|
|||
cfg.SetDefault("workers.frostfs", "10")
|
||||
cfg.SetDefault("workers.container", "10")
|
||||
cfg.SetDefault("workers.alphabet", "10")
|
||||
cfg.SetDefault("workers.reputation", "10")
|
||||
cfg.SetDefault("workers.subnet", "10")
|
||||
}
|
||||
|
||||
func setTimersDefaults(cfg *viper.Viper) {
|
||||
|
@ -161,11 +103,8 @@ func setContractsDefaults(cfg *viper.Viper) {
|
|||
cfg.SetDefault("contracts.frostfs", "")
|
||||
cfg.SetDefault("contracts.balance", "")
|
||||
cfg.SetDefault("contracts.container", "")
|
||||
cfg.SetDefault("contracts.audit", "")
|
||||
cfg.SetDefault("contracts.proxy", "")
|
||||
cfg.SetDefault("contracts.processing", "")
|
||||
cfg.SetDefault("contracts.reputation", "")
|
||||
cfg.SetDefault("contracts.subnet", "")
|
||||
cfg.SetDefault("contracts.proxy", "")
|
||||
}
|
||||
|
||||
|
|
87
cmd/frostfs-ir/httpcomponent.go
Normal file
87
cmd/frostfs-ir/httpcomponent.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type httpComponent struct {
|
||||
srv *httputil.Server
|
||||
address string
|
||||
name string
|
||||
handler http.Handler
|
||||
shutdownDur time.Duration
|
||||
enabled bool
|
||||
}
|
||||
|
||||
const (
|
||||
enabledKeyPostfix = ".enabled"
|
||||
addressKeyPostfix = ".address"
|
||||
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
|
||||
)
|
||||
|
||||
func (c *httpComponent) init() {
|
||||
log.Info(fmt.Sprintf("init %s", c.name))
|
||||
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
|
||||
c.address = cfg.GetString(c.name + addressKeyPostfix)
|
||||
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
|
||||
|
||||
if c.enabled {
|
||||
c.srv = httputil.New(
|
||||
httputil.HTTPSrvPrm{
|
||||
Address: c.address,
|
||||
Handler: c.handler,
|
||||
},
|
||||
httputil.WithShutdownTimeout(c.shutdownDur),
|
||||
)
|
||||
} else {
|
||||
log.Info(fmt.Sprintf("%s is disabled, skip", c.name))
|
||||
c.srv = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpComponent) start() {
|
||||
if c.srv != nil {
|
||||
log.Info(fmt.Sprintf("start %s", c.name))
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
exitErr(c.srv.Serve())
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpComponent) shutdown() error {
|
||||
if c.srv != nil {
|
||||
log.Info(fmt.Sprintf("shutdown %s", c.name))
|
||||
return c.srv.Shutdown()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *httpComponent) needReload() bool {
|
||||
enabled := cfg.GetBool(c.name + enabledKeyPostfix)
|
||||
address := cfg.GetString(c.name + addressKeyPostfix)
|
||||
dur := cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
|
||||
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
|
||||
}
|
||||
|
||||
func (c *httpComponent) reload() {
|
||||
log.Info(fmt.Sprintf("reload %s", c.name))
|
||||
if c.needReload() {
|
||||
log.Info(fmt.Sprintf("%s config updated", c.name))
|
||||
if err := c.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
} else {
|
||||
c.init()
|
||||
c.start()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -4,16 +4,13 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -24,6 +21,21 @@ const (
|
|||
|
||||
// SuccessReturnCode returns when application closed without panic.
|
||||
SuccessReturnCode = 0
|
||||
|
||||
EnvPrefix = "FROSTFS_IR"
|
||||
)
|
||||
|
||||
var (
|
||||
wg = new(sync.WaitGroup)
|
||||
intErr = make(chan error) // internal inner ring errors
|
||||
logPrm = new(logger.Prm)
|
||||
innerRing *innerring.Server
|
||||
pprofCmp *pprofComponent
|
||||
metricsCmp *httpComponent
|
||||
log *logger.Logger
|
||||
cfg *viper.Viper
|
||||
configFile *string
|
||||
configDir *string
|
||||
)
|
||||
|
||||
func exitErr(err error) {
|
||||
|
@ -34,8 +46,8 @@ func exitErr(err error) {
|
|||
}
|
||||
|
||||
func main() {
|
||||
configFile := flag.String("config", "", "path to config")
|
||||
configDir := flag.String("config-dir", "", "path to config directory")
|
||||
configFile = flag.String("config", "", "path to config")
|
||||
configDir = flag.String("config-dir", "", "path to config directory")
|
||||
versionFlag := flag.Bool("version", false, "frostfs-ir node version")
|
||||
flag.Parse()
|
||||
|
||||
|
@ -45,101 +57,58 @@ func main() {
|
|||
os.Exit(SuccessReturnCode)
|
||||
}
|
||||
|
||||
cfg, err := newConfig(*configFile, *configDir)
|
||||
var err error
|
||||
cfg, err = newConfig()
|
||||
exitErr(err)
|
||||
|
||||
var logPrm logger.Prm
|
||||
|
||||
err = logPrm.SetLevelString(
|
||||
cfg.GetString("logger.level"),
|
||||
)
|
||||
exitErr(err)
|
||||
|
||||
log, err := logger.NewLogger(&logPrm)
|
||||
log, err = logger.NewLogger(logPrm)
|
||||
exitErr(err)
|
||||
|
||||
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||
defer cancel()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
intErr := make(chan error) // internal inner ring errors
|
||||
pprofCmp = newPprofComponent()
|
||||
pprofCmp.init()
|
||||
|
||||
httpServers := initHTTPServers(cfg, log)
|
||||
metricsCmp = newMetricsComponent()
|
||||
metricsCmp.init()
|
||||
|
||||
innerRing, err := innerring.New(ctx, log, cfg, intErr)
|
||||
innerRing, err = innerring.New(ctx, log, cfg, intErr)
|
||||
exitErr(err)
|
||||
|
||||
// start HTTP servers
|
||||
for i := range httpServers {
|
||||
srv := httpServers[i]
|
||||
go func() {
|
||||
exitErr(srv.Serve())
|
||||
}()
|
||||
}
|
||||
pprofCmp.start()
|
||||
metricsCmp.start()
|
||||
|
||||
// start inner ring
|
||||
err = innerRing.Start(ctx, intErr)
|
||||
exitErr(err)
|
||||
|
||||
log.Info("application started",
|
||||
log.Info(logs.CommonApplicationStarted,
|
||||
zap.String("version", misc.Version))
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case err := <-intErr:
|
||||
log.Info("internal error", zap.String("msg", err.Error()))
|
||||
}
|
||||
watchForSignal(cancel)
|
||||
|
||||
innerRing.Stop()
|
||||
<-ctx.Done() // graceful shutdown
|
||||
log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
wg.Wait()
|
||||
|
||||
// shut down HTTP servers
|
||||
for i := range httpServers {
|
||||
srv := httpServers[i]
|
||||
|
||||
go func() {
|
||||
err := srv.Shutdown()
|
||||
if err != nil {
|
||||
log.Debug("could not shutdown HTTP server",
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
log.Info("application stopped")
|
||||
log.Info(logs.FrostFSIRApplicationStopped)
|
||||
}
|
||||
|
||||
func initHTTPServers(cfg *viper.Viper, log *logger.Logger) []*httputil.Server {
|
||||
items := []struct {
|
||||
cfgPrefix string
|
||||
handler func() http.Handler
|
||||
}{
|
||||
{"pprof", httputil.Handler},
|
||||
{"prometheus", promhttp.Handler},
|
||||
}
|
||||
|
||||
httpServers := make([]*httputil.Server, 0, len(items))
|
||||
|
||||
for _, item := range items {
|
||||
if !cfg.GetBool(item.cfgPrefix + ".enabled") {
|
||||
log.Info(item.cfgPrefix + " is disabled, skip")
|
||||
continue
|
||||
}
|
||||
|
||||
addr := cfg.GetString(item.cfgPrefix + ".address")
|
||||
|
||||
var prm httputil.HTTPSrvPrm
|
||||
|
||||
prm.Address = addr
|
||||
prm.Handler = item.handler()
|
||||
|
||||
httpServers = append(httpServers,
|
||||
httputil.New(prm,
|
||||
httputil.WithShutdownTimeout(
|
||||
cfg.GetDuration(item.cfgPrefix+".shutdown_timeout"),
|
||||
),
|
||||
),
|
||||
func shutdown() {
|
||||
innerRing.Stop()
|
||||
if err := metricsCmp.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
if err := pprofCmp.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
||||
return httpServers
|
||||
}
|
||||
|
|
12
cmd/frostfs-ir/metrics.go
Normal file
12
cmd/frostfs-ir/metrics.go
Normal file
|
@ -0,0 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
|
||||
)
|
||||
|
||||
func newMetricsComponent() *httpComponent {
|
||||
return &httpComponent{
|
||||
name: "prometheus",
|
||||
handler: metrics.Handler(),
|
||||
}
|
||||
}
|
68
cmd/frostfs-ir/pprof.go
Normal file
68
cmd/frostfs-ir/pprof.go
Normal file
|
@ -0,0 +1,68 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type pprofComponent struct {
|
||||
httpComponent
|
||||
blockRate int
|
||||
mutexRate int
|
||||
}
|
||||
|
||||
const (
|
||||
pprofBlockRateKey = "pprof.block_rate"
|
||||
pprofMutexRateKey = "pprof.mutex_rate"
|
||||
)
|
||||
|
||||
func newPprofComponent() *pprofComponent {
|
||||
return &pprofComponent{
|
||||
httpComponent: httpComponent{
|
||||
name: "pprof",
|
||||
handler: httputil.Handler(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *pprofComponent) init() {
|
||||
c.httpComponent.init()
|
||||
|
||||
if c.enabled {
|
||||
c.blockRate = cfg.GetInt(pprofBlockRateKey)
|
||||
c.mutexRate = cfg.GetInt(pprofMutexRateKey)
|
||||
runtime.SetBlockProfileRate(c.blockRate)
|
||||
runtime.SetMutexProfileFraction(c.mutexRate)
|
||||
} else {
|
||||
c.blockRate = 0
|
||||
c.mutexRate = 0
|
||||
runtime.SetBlockProfileRate(0)
|
||||
runtime.SetMutexProfileFraction(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *pprofComponent) needReload() bool {
|
||||
blockRate := cfg.GetInt(pprofBlockRateKey)
|
||||
mutexRate := cfg.GetInt(pprofMutexRateKey)
|
||||
return c.httpComponent.needReload() ||
|
||||
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
|
||||
}
|
||||
|
||||
func (c *pprofComponent) reload() {
|
||||
log.Info(fmt.Sprintf("reload %s", c.name))
|
||||
if c.needReload() {
|
||||
log.Info(fmt.Sprintf("%s config updated", c.name))
|
||||
if err := c.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
c.init()
|
||||
c.start()
|
||||
}
|
||||
}
|
|
@ -36,7 +36,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
|
|||
storageID := meta.StorageIDPrm{}
|
||||
storageID.SetAddress(addr)
|
||||
|
||||
resStorageID, err := db.StorageID(storageID)
|
||||
resStorageID, err := db.StorageID(cmd.Context(), storageID)
|
||||
common.ExitOnErr(cmd, common.Errf("could not check if the obj is small: %w", err))
|
||||
|
||||
if id := resStorageID.StorageID(); id != nil {
|
||||
|
@ -51,7 +51,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
|
|||
|
||||
siErr := new(object.SplitInfoError)
|
||||
|
||||
res, err := db.Get(prm)
|
||||
res, err := db.Get(cmd.Context(), prm)
|
||||
if errors.As(err, &siErr) {
|
||||
link, linkSet := siErr.SplitInfo().Link()
|
||||
last, lastSet := siErr.SplitInfo().LastPart()
|
||||
|
|
|
@ -24,6 +24,61 @@ type valueWithTime[V any] struct {
|
|||
e error
|
||||
}
|
||||
|
||||
type locker struct {
|
||||
mtx *sync.Mutex
|
||||
waiters int // not protected by mtx, must used outer mutex to update concurrently
|
||||
}
|
||||
|
||||
type keyLocker[K comparable] struct {
|
||||
lockers map[K]*locker
|
||||
lockersMtx *sync.Mutex
|
||||
}
|
||||
|
||||
func newKeyLocker[K comparable]() *keyLocker[K] {
|
||||
return &keyLocker[K]{
|
||||
lockers: make(map[K]*locker),
|
||||
lockersMtx: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (l *keyLocker[K]) LockKey(key K) {
|
||||
l.lockersMtx.Lock()
|
||||
|
||||
if locker, found := l.lockers[key]; found {
|
||||
locker.waiters++
|
||||
l.lockersMtx.Unlock()
|
||||
|
||||
locker.mtx.Lock()
|
||||
return
|
||||
}
|
||||
|
||||
locker := &locker{
|
||||
mtx: &sync.Mutex{},
|
||||
waiters: 1,
|
||||
}
|
||||
locker.mtx.Lock()
|
||||
|
||||
l.lockers[key] = locker
|
||||
l.lockersMtx.Unlock()
|
||||
}
|
||||
|
||||
func (l *keyLocker[K]) UnlockKey(key K) {
|
||||
l.lockersMtx.Lock()
|
||||
defer l.lockersMtx.Unlock()
|
||||
|
||||
locker, found := l.lockers[key]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
if locker.waiters == 1 {
|
||||
delete(l.lockers, key)
|
||||
}
|
||||
locker.waiters--
|
||||
|
||||
locker.mtx.Unlock()
|
||||
}
|
||||
|
||||
// entity that provides TTL cache interface.
|
||||
type ttlNetCache[K comparable, V any] struct {
|
||||
ttl time.Duration
|
||||
|
@ -33,6 +88,8 @@ type ttlNetCache[K comparable, V any] struct {
|
|||
cache *lru.Cache[K, *valueWithTime[V]]
|
||||
|
||||
netRdr netValueReader[K, V]
|
||||
|
||||
keyLocker *keyLocker[K]
|
||||
}
|
||||
|
||||
// complicates netValueReader with TTL caching mechanism.
|
||||
|
@ -41,10 +98,11 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
|
|||
fatalOnErr(err)
|
||||
|
||||
return &ttlNetCache[K, V]{
|
||||
ttl: ttl,
|
||||
sz: sz,
|
||||
cache: cache,
|
||||
netRdr: netRdr,
|
||||
ttl: ttl,
|
||||
sz: sz,
|
||||
cache: cache,
|
||||
netRdr: netRdr,
|
||||
keyLocker: newKeyLocker[K](),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,22 +113,33 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
|
|||
// returned value should not be modified.
|
||||
func (c *ttlNetCache[K, V]) get(key K) (V, error) {
|
||||
val, ok := c.cache.Peek(key)
|
||||
if ok {
|
||||
if time.Since(val.t) < c.ttl {
|
||||
return val.v, val.e
|
||||
}
|
||||
if ok && time.Since(val.t) < c.ttl {
|
||||
return val.v, val.e
|
||||
}
|
||||
|
||||
c.cache.Remove(key)
|
||||
c.keyLocker.LockKey(key)
|
||||
defer c.keyLocker.UnlockKey(key)
|
||||
|
||||
val, ok = c.cache.Peek(key)
|
||||
if ok && time.Since(val.t) < c.ttl {
|
||||
return val.v, val.e
|
||||
}
|
||||
|
||||
v, err := c.netRdr(key)
|
||||
|
||||
c.set(key, v, err)
|
||||
c.cache.Add(key, &valueWithTime[V]{
|
||||
v: v,
|
||||
t: time.Now(),
|
||||
e: err,
|
||||
})
|
||||
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (c *ttlNetCache[K, V]) set(k K, v V, e error) {
|
||||
c.keyLocker.LockKey(k)
|
||||
defer c.keyLocker.UnlockKey(k)
|
||||
|
||||
c.cache.Add(k, &valueWithTime[V]{
|
||||
v: v,
|
||||
t: time.Now(),
|
||||
|
@ -79,6 +148,9 @@ func (c *ttlNetCache[K, V]) set(k K, v V, e error) {
|
|||
}
|
||||
|
||||
func (c *ttlNetCache[K, V]) remove(key K) {
|
||||
c.keyLocker.LockKey(key)
|
||||
defer c.keyLocker.UnlockKey(key)
|
||||
|
||||
c.cache.Remove(key)
|
||||
}
|
||||
|
||||
|
|
32
cmd/frostfs-node/cache_test.go
Normal file
32
cmd/frostfs-node/cache_test.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func TestKeyLocker(t *testing.T) {
|
||||
taken := false
|
||||
eg, _ := errgroup.WithContext(context.Background())
|
||||
keyLocker := newKeyLocker[int]()
|
||||
for i := 0; i < 100; i++ {
|
||||
eg.Go(func() error {
|
||||
keyLocker.LockKey(0)
|
||||
defer keyLocker.UnlockKey(0)
|
||||
|
||||
require.False(t, taken)
|
||||
taken = true
|
||||
require.True(t, taken)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
taken = false
|
||||
require.False(t, taken)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
require.NoError(t, eg.Wait())
|
||||
}
|
|
@ -29,6 +29,7 @@ import (
|
|||
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
|
||||
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
|
||||
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||
|
@ -54,8 +55,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone"
|
||||
tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
|
||||
trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller"
|
||||
truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
||||
|
@ -342,13 +341,13 @@ type internals struct {
|
|||
func (c *cfg) startMaintenance() {
|
||||
c.isMaintenance.Store(true)
|
||||
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
|
||||
c.log.Info("started local node's maintenance")
|
||||
c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
|
||||
}
|
||||
|
||||
// stops node's maintenance.
|
||||
func (c *internals) stopMaintenance() {
|
||||
c.isMaintenance.Store(false)
|
||||
c.log.Info("stopped local node's maintenance")
|
||||
c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
|
||||
}
|
||||
|
||||
// IsMaintenance checks if storage node is under maintenance.
|
||||
|
@ -413,7 +412,6 @@ type cfg struct {
|
|||
cfgNodeInfo cfgNodeInfo
|
||||
cfgNetmap cfgNetmap
|
||||
cfgControlService cfgControlService
|
||||
cfgReputation cfgReputation
|
||||
cfgObject cfgObject
|
||||
cfgNotifications cfgNotifications
|
||||
}
|
||||
|
@ -451,8 +449,6 @@ type cfgMorph struct {
|
|||
// TTL of Sidechain cached values. Non-positive value disables caching.
|
||||
cacheTTL time.Duration
|
||||
|
||||
eigenTrustTicker *eigenTrustTickers // timers for EigenTrust iterations
|
||||
|
||||
proxyScriptHash neogoutil.Uint160
|
||||
}
|
||||
|
||||
|
@ -531,16 +527,6 @@ type cfgControlService struct {
|
|||
server *grpc.Server
|
||||
}
|
||||
|
||||
type cfgReputation struct {
|
||||
workerPool util.WorkerPool // pool for EigenTrust algorithm's iterations
|
||||
|
||||
localTrustStorage *truststorage.Storage
|
||||
|
||||
localTrustCtrl *trustcontroller.Controller
|
||||
|
||||
scriptHash neogoutil.Uint160
|
||||
}
|
||||
|
||||
var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block")
|
||||
|
||||
func initCfg(appCfg *config.Config) *cfg {
|
||||
|
@ -581,8 +567,6 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
}
|
||||
c.cfgObject = initCfgObject(appCfg)
|
||||
|
||||
c.cfgReputation = initReputation(appCfg)
|
||||
|
||||
user.IDFromKey(&c.ownerIDFromKey, key.PrivateKey.PublicKey)
|
||||
|
||||
c.metricsCollector = metrics.NewNodeMetrics()
|
||||
|
@ -661,16 +645,6 @@ func initContainer(appCfg *config.Config) cfgContainer {
|
|||
}
|
||||
}
|
||||
|
||||
func initReputation(appCfg *config.Config) cfgReputation {
|
||||
reputationWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
|
||||
fatalOnErr(err)
|
||||
|
||||
return cfgReputation{
|
||||
scriptHash: contractsconfig.Reputation(appCfg),
|
||||
workerPool: reputationWorkerPool,
|
||||
}
|
||||
}
|
||||
|
||||
func initCfgGRPC() cfgGRPC {
|
||||
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
|
||||
maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
|
||||
|
@ -881,10 +855,10 @@ func initLocalStorage(c *cfg) {
|
|||
for _, optsWithMeta := range c.shardOpts() {
|
||||
id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(tombstoneSource))...)
|
||||
if err != nil {
|
||||
c.log.Error("failed to attach shard to engine", zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
||||
} else {
|
||||
shardsAttached++
|
||||
c.log.Info("shard attached to engine", zap.Stringer("id", id))
|
||||
c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
|
||||
}
|
||||
}
|
||||
if shardsAttached == 0 {
|
||||
|
@ -894,15 +868,15 @@ func initLocalStorage(c *cfg) {
|
|||
c.cfgObject.cfgLocalStorage.localStorage = ls
|
||||
|
||||
c.onShutdown(func() {
|
||||
c.log.Info("closing components of the storage engine...")
|
||||
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
|
||||
|
||||
err := ls.Close()
|
||||
if err != nil {
|
||||
c.log.Info("storage engine closing failure",
|
||||
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
} else {
|
||||
c.log.Info("all components of the storage engine closed successfully")
|
||||
c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -976,11 +950,11 @@ func (c *cfg) bootstrap() error {
|
|||
// switch to online except when under maintenance
|
||||
st := c.cfgNetmap.state.controlNetmapStatus()
|
||||
if st == control.NetmapStatus_MAINTENANCE {
|
||||
c.log.Info("bootstrapping with the maintenance state")
|
||||
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||
return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
|
||||
}
|
||||
|
||||
c.log.Info("bootstrapping with online state",
|
||||
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||
zap.Stringer("previous", st),
|
||||
)
|
||||
|
||||
|
@ -1015,32 +989,32 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
case syscall.SIGHUP:
|
||||
c.reloadConfig(ctx)
|
||||
case syscall.SIGTERM, syscall.SIGINT:
|
||||
c.log.Info("termination signal has been received, stopping...")
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
// TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
|
||||
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info("termination signal processing is complete")
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
}
|
||||
case err := <-c.internalErr: // internal application error
|
||||
c.log.Warn("internal application error",
|
||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
||||
zap.String("message", err.Error()))
|
||||
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info("internal error processing is complete")
|
||||
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cfg) reloadConfig(ctx context.Context) {
|
||||
c.log.Info("SIGHUP has been received, rereading configuration...")
|
||||
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
|
||||
err := c.readConfig(c.appCfg)
|
||||
if err != nil {
|
||||
c.log.Error("configuration reading", zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1052,7 +1026,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
logPrm, err := c.loggerPrm()
|
||||
if err != nil {
|
||||
c.log.Error("logger configuration preparation", zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1060,7 +1034,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
components = append(components, dCmp{"tracing", func() error {
|
||||
updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
|
||||
if updated {
|
||||
c.log.Info("tracing configation updated")
|
||||
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
|
||||
}
|
||||
return err
|
||||
}})
|
||||
|
@ -1085,20 +1059,20 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
|
||||
if err != nil {
|
||||
c.log.Error("storage engine configuration update", zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, component := range components {
|
||||
err = component.reloadFunc()
|
||||
if err != nil {
|
||||
c.log.Error("updated configuration applying",
|
||||
c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
|
||||
zap.String("component", component.name),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
c.log.Info("configuration has been reloaded successfully")
|
||||
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
}
|
||||
|
||||
func (c *cfg) shutdown() {
|
||||
|
|
|
@ -2,6 +2,8 @@ package config
|
|||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||
)
|
||||
|
||||
// Sub returns a subsection of the Config by name.
|
||||
|
@ -38,11 +40,11 @@ func (x *Config) Sub(name string) *Config {
|
|||
//
|
||||
// Returns nil if config is nil.
|
||||
func (x *Config) Value(name string) any {
|
||||
value := x.v.Get(strings.Join(append(x.path, name), separator))
|
||||
value := x.v.Get(strings.Join(append(x.path, name), configViper.Separator))
|
||||
if value != nil || x.defaultPath == nil {
|
||||
return value
|
||||
}
|
||||
return x.v.Get(strings.Join(append(x.defaultPath, name), separator))
|
||||
return x.v.Get(strings.Join(append(x.defaultPath, name), configViper.Separator))
|
||||
}
|
||||
|
||||
// SetDefault sets fallback config for missing values.
|
||||
|
|
|
@ -2,11 +2,12 @@ package config_test
|
|||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/internal"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -35,7 +36,9 @@ func TestConfigEnv(t *testing.T) {
|
|||
value = "some value"
|
||||
)
|
||||
|
||||
err := os.Setenv(internal.Env(section, name), value)
|
||||
envName := strings.ToUpper(
|
||||
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
|
||||
err := os.Setenv(envName, value)
|
||||
require.NoError(t, err)
|
||||
|
||||
c := configtest.EmptyConfig()
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/internal"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
|
||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
@ -18,69 +14,47 @@ import (
|
|||
type Config struct {
|
||||
v *viper.Viper
|
||||
|
||||
opts opts
|
||||
configFile string
|
||||
configDir string
|
||||
envPrefix string
|
||||
|
||||
defaultPath []string
|
||||
path []string
|
||||
}
|
||||
|
||||
const separator = "."
|
||||
|
||||
// Prm groups required parameters of the Config.
|
||||
type Prm struct{}
|
||||
const (
|
||||
// EnvPrefix is a prefix of ENV variables related
|
||||
// to storage node configuration.
|
||||
EnvPrefix = "FROSTFS"
|
||||
)
|
||||
|
||||
// New creates a new Config instance.
|
||||
//
|
||||
// If file option is provided (WithConfigFile),
|
||||
// If file option is provided,
|
||||
// configuration values are read from it.
|
||||
// Otherwise, Config is a degenerate tree.
|
||||
func New(_ Prm, opts ...Option) *Config {
|
||||
v := viper.New()
|
||||
func New(configFile, configDir, envPrefix string) *Config {
|
||||
v, err := configViper.CreateViper(
|
||||
configViper.WithConfigFile(configFile),
|
||||
configViper.WithConfigDir(configDir),
|
||||
configViper.WithEnvPrefix(envPrefix))
|
||||
|
||||
v.SetEnvPrefix(internal.EnvPrefix)
|
||||
v.AutomaticEnv()
|
||||
v.SetEnvKeyReplacer(strings.NewReplacer(separator, internal.EnvSeparator))
|
||||
|
||||
o := defaultOpts()
|
||||
for i := range opts {
|
||||
opts[i](o)
|
||||
}
|
||||
|
||||
if o.path != "" {
|
||||
v.SetConfigFile(o.path)
|
||||
|
||||
err := v.ReadInConfig()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to read config: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if o.configDir != "" {
|
||||
if err := config.ReadConfigDir(v, o.configDir); err != nil {
|
||||
panic(fmt.Errorf("failed to read config dir: %w", err))
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &Config{
|
||||
v: v,
|
||||
opts: *o,
|
||||
v: v,
|
||||
configFile: configFile,
|
||||
configDir: configDir,
|
||||
envPrefix: envPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
// Reload reads configuration path if it was provided to New.
|
||||
func (x *Config) Reload() error {
|
||||
if x.opts.path != "" {
|
||||
err := x.v.ReadInConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("rereading configuration file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if x.opts.configDir != "" {
|
||||
if err := config.ReadConfigDir(x.v, x.opts.configDir); err != nil {
|
||||
return fmt.Errorf("rereading configuration dir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return configViper.ReloadViper(
|
||||
configViper.WithViper(x.v), configViper.WithConfigFile(x.configFile),
|
||||
configViper.WithConfigDir(x.configDir),
|
||||
configViper.WithEnvPrefix(x.envPrefix))
|
||||
}
|
||||
|
|
24
cmd/frostfs-node/config/configdir_test.go
Normal file
24
cmd/frostfs-node/config/configdir_test.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cast"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigDir(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfgFileName0 := path.Join(dir, "cfg_00.json")
|
||||
cfgFileName1 := path.Join(dir, "cfg_01.yml")
|
||||
|
||||
require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0777))
|
||||
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0777))
|
||||
|
||||
c := New("", dir, "")
|
||||
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
|
||||
require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
|
||||
}
|
|
@ -38,15 +38,6 @@ func Container(c *config.Config) util.Uint160 {
|
|||
return contractAddress(c, "container")
|
||||
}
|
||||
|
||||
// Reputation returnsthe value of "reputation" config parameter
|
||||
// from "contracts" section.
|
||||
//
|
||||
// Returns zero filled script hash if the value is not set.
|
||||
// Throws panic if the value is not a 20-byte LE hex-encoded string.
|
||||
func Reputation(c *config.Config) util.Uint160 {
|
||||
return contractAddress(c, "reputation")
|
||||
}
|
||||
|
||||
// Proxy returnsthe value of "proxy" config parameter
|
||||
// from "contracts" section.
|
||||
//
|
||||
|
|
|
@ -18,7 +18,6 @@ func TestContractsSection(t *testing.T) {
|
|||
require.Equal(t, emptyHash, contractsconfig.Balance(empty))
|
||||
require.Equal(t, emptyHash, contractsconfig.Container(empty))
|
||||
require.Equal(t, emptyHash, contractsconfig.Netmap(empty))
|
||||
require.Equal(t, emptyHash, contractsconfig.Reputation(empty))
|
||||
require.Equal(t, emptyHash, contractsconfig.Proxy(empty))
|
||||
})
|
||||
|
||||
|
@ -33,9 +32,6 @@ func TestContractsSection(t *testing.T) {
|
|||
expNetmap, err := util.Uint160DecodeStringLE("0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca")
|
||||
require.NoError(t, err)
|
||||
|
||||
expReputation, err := util.Uint160DecodeStringLE("441995f631c1da2b133462b71859494a5cd45e90")
|
||||
require.NoError(t, err)
|
||||
|
||||
expProxy, err := util.Uint160DecodeStringLE("ad7c6b55b737b696e5c82c85445040964a03e97f")
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -43,13 +39,11 @@ func TestContractsSection(t *testing.T) {
|
|||
balance := contractsconfig.Balance(c)
|
||||
container := contractsconfig.Container(c)
|
||||
netmap := contractsconfig.Netmap(c)
|
||||
reputation := contractsconfig.Reputation(c)
|
||||
proxy := contractsconfig.Proxy(c)
|
||||
|
||||
require.Equal(t, expBalance, balance)
|
||||
require.Equal(t, expConatiner, container)
|
||||
require.Equal(t, expNetmap, netmap)
|
||||
require.Equal(t, expReputation, reputation)
|
||||
require.Equal(t, expProxy, proxy)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// EnvPrefix is a prefix of ENV variables related
|
||||
// to storage node configuration.
|
||||
const EnvPrefix = "FROSTFS"
|
||||
|
||||
// EnvSeparator is a section separator in ENV variables.
|
||||
const EnvSeparator = "_"
|
||||
|
||||
// Env returns ENV variable key for a particular config parameter.
|
||||
func Env(path ...string) string {
|
||||
return strings.ToUpper(
|
||||
strings.Join(
|
||||
append([]string{EnvPrefix}, path...),
|
||||
EnvSeparator,
|
||||
),
|
||||
)
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
package internal_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/internal"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEnv(t *testing.T) {
|
||||
require.Equal(t,
|
||||
"FROSTFS_SECTION_PARAMETER",
|
||||
internal.Env("section", "parameter"),
|
||||
)
|
||||
}
|
|
@ -177,33 +177,6 @@ func (p PersistentStateConfig) Path() string {
|
|||
return PersistentStatePathDefault
|
||||
}
|
||||
|
||||
// SubnetConfig represents node configuration related to subnets.
|
||||
type SubnetConfig config.Config
|
||||
|
||||
// Init initializes SubnetConfig from "subnet" sub-section of "node" section
|
||||
// of the root config.
|
||||
func (x *SubnetConfig) Init(root config.Config) {
|
||||
*x = SubnetConfig(*root.Sub(subsection).Sub("subnet"))
|
||||
}
|
||||
|
||||
// ExitZero returns the value of "exit_zero" config parameter as bool.
|
||||
// Returns false if the value can not be cast.
|
||||
func (x SubnetConfig) ExitZero() bool {
|
||||
return config.BoolSafe((*config.Config)(&x), "exit_zero")
|
||||
}
|
||||
|
||||
// IterateSubnets casts the value of "entries" config parameter to string slice,
|
||||
// iterates over all of its elements and passes them to f.
|
||||
//
|
||||
// Does nothing if the value can not be cast to string slice.
|
||||
func (x SubnetConfig) IterateSubnets(f func(string)) {
|
||||
ids := config.StringSliceSafe((*config.Config)(&x), "entries")
|
||||
|
||||
for i := range ids {
|
||||
f(ids[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Notification returns structure that provides access to "notification"
|
||||
// subsection of "node" section.
|
||||
func Notification(c *config.Config) NotificationConfig {
|
||||
|
|
|
@ -52,20 +52,6 @@ func TestNodeSection(t *testing.T) {
|
|||
require.Equal(t, "", notificationDefaultCertPath)
|
||||
require.Equal(t, "", notificationDefaultKeyPath)
|
||||
require.Equal(t, "", notificationDefaultCAPath)
|
||||
|
||||
var subnetCfg SubnetConfig
|
||||
|
||||
subnetCfg.Init(*empty)
|
||||
|
||||
require.False(t, subnetCfg.ExitZero())
|
||||
|
||||
called := false
|
||||
|
||||
subnetCfg.IterateSubnets(func(string) {
|
||||
called = true
|
||||
})
|
||||
|
||||
require.False(t, called)
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
|
@ -143,20 +129,6 @@ func TestNodeSection(t *testing.T) {
|
|||
require.Equal(t, "/cert/path", notificationCertPath)
|
||||
require.Equal(t, "/key/path", notificationKeyPath)
|
||||
require.Equal(t, "/ca/path", notificationCAPath)
|
||||
|
||||
var subnetCfg SubnetConfig
|
||||
|
||||
subnetCfg.Init(*c)
|
||||
|
||||
require.True(t, subnetCfg.ExitZero())
|
||||
|
||||
var ids []string
|
||||
|
||||
subnetCfg.IterateSubnets(func(id string) {
|
||||
ids = append(ids, id)
|
||||
})
|
||||
|
||||
require.Equal(t, []string{"123", "456", "789"}, ids)
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
|
|
@ -51,3 +51,27 @@ func Address(c *config.Config) string {
|
|||
|
||||
return AddressDefault
|
||||
}
|
||||
|
||||
// BlockRates returns the value of "block_rate" config parameter
|
||||
// from "pprof" section.
|
||||
func BlockRate(c *config.Config) int {
|
||||
s := c.Sub(subsection)
|
||||
|
||||
v := int(config.IntSafe(s, "block_rate"))
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// MutexRate returns the value of "mutex_rate" config parameter
|
||||
// from "pprof" section.
|
||||
func MutexRate(c *config.Config) int {
|
||||
s := c.Sub(subsection)
|
||||
|
||||
v := int(config.IntSafe(s, "mutex_rate"))
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
|
|
@ -18,6 +18,9 @@ func TestProfilerSection(t *testing.T) {
|
|||
require.Equal(t, profilerconfig.ShutdownTimeoutDefault, to)
|
||||
require.Equal(t, profilerconfig.AddressDefault, addr)
|
||||
require.False(t, profilerconfig.Enabled(configtest.EmptyConfig()))
|
||||
|
||||
require.Zero(t, profilerconfig.BlockRate(configtest.EmptyConfig()))
|
||||
require.Zero(t, profilerconfig.MutexRate(configtest.EmptyConfig()))
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
|
@ -29,6 +32,9 @@ func TestProfilerSection(t *testing.T) {
|
|||
require.Equal(t, 15*time.Second, to)
|
||||
require.Equal(t, "localhost:6060", addr)
|
||||
require.True(t, profilerconfig.Enabled(c))
|
||||
|
||||
require.Equal(t, 10_000, profilerconfig.BlockRate(c))
|
||||
require.Equal(t, 10_000, profilerconfig.MutexRate(c))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
|
|
@ -11,21 +11,15 @@ import (
|
|||
)
|
||||
|
||||
func fromFile(path string) *config.Config {
|
||||
var p config.Prm
|
||||
|
||||
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||
|
||||
return config.New(p,
|
||||
config.WithConfigFile(path),
|
||||
)
|
||||
return config.New(path, "", "")
|
||||
}
|
||||
|
||||
func fromEnvFile(t testing.TB, path string) *config.Config {
|
||||
var p config.Prm
|
||||
|
||||
loadEnv(t, path) // github.com/joho/godotenv can do that as well
|
||||
|
||||
return config.New(p)
|
||||
return config.New("", "", config.EnvPrefix)
|
||||
}
|
||||
|
||||
func forEachFile(paths []string, f func(*config.Config)) {
|
||||
|
@ -51,9 +45,7 @@ func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
|
|||
|
||||
// EmptyConfig returns config without any values and sections.
|
||||
func EmptyConfig() *config.Config {
|
||||
var p config.Prm
|
||||
|
||||
return config.New(p)
|
||||
return config.New("", "", config.EnvPrefix)
|
||||
}
|
||||
|
||||
// loadEnv reads .env file, parses `X=Y` records and sets OS ENVs.
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
containerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
|
@ -130,19 +131,20 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
// TODO: use owner directly from the event after neofs-contract#256 will become resolved
|
||||
// but don't forget about the profit of reading the new container and caching it:
|
||||
// creation success are most commonly tracked by polling GET op.
|
||||
cnr, err := cachedContainerStorage.Get(ev.ID)
|
||||
cnr, err := cnrSrc.Get(ev.ID)
|
||||
if err == nil {
|
||||
cachedContainerLister.update(cnr.Value.Owner(), ev.ID, true)
|
||||
cachedContainerStorage.set(ev.ID, cnr, nil)
|
||||
} else {
|
||||
// unlike removal, we expect successful receive of the container
|
||||
// after successful creation, so logging can be useful
|
||||
c.log.Error("read newly created container after the notification",
|
||||
c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
|
||||
zap.Stringer("id", ev.ID),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
c.log.Debug("container creation event's receipt",
|
||||
c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
|
||||
zap.Stringer("id", ev.ID),
|
||||
)
|
||||
})
|
||||
|
@ -161,7 +163,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
|
||||
cachedContainerStorage.handleRemoval(ev.ID)
|
||||
|
||||
c.log.Debug("container removal event's receipt",
|
||||
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
|
||||
zap.Stringer("id", ev.ID),
|
||||
)
|
||||
})
|
||||
|
@ -295,7 +297,7 @@ type morphLoadWriter struct {
|
|||
}
|
||||
|
||||
func (w *morphLoadWriter) Put(a containerSDK.SizeEstimation) error {
|
||||
w.log.Debug("save used space announcement in contract",
|
||||
w.log.Debug(logs.FrostFSNodeSaveUsedSpaceAnnouncementInContract,
|
||||
zap.Uint64("epoch", a.Epoch()),
|
||||
zap.Stringer("cid", a.Container()),
|
||||
zap.Uint64("size", a.Value()),
|
||||
|
@ -329,7 +331,7 @@ type remoteLoadAnnounceProvider struct {
|
|||
netmapKeys netmapCore.AnnouncedKeys
|
||||
|
||||
clientCache interface {
|
||||
Get(client.NodeInfo) (client.Client, error)
|
||||
Get(client.NodeInfo) (client.MultiAddressClient, error)
|
||||
}
|
||||
|
||||
deadEndProvider loadcontroller.WriterProvider
|
||||
|
@ -458,7 +460,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
|
|||
for i := range idList {
|
||||
sz, err := engine.ContainerSize(d.engine, idList[i])
|
||||
if err != nil {
|
||||
d.log.Debug("failed to calculate container size in storage engine",
|
||||
d.log.Debug(logs.FrostFSNodeFailedToCalculateContainerSizeInStorageEngine,
|
||||
zap.Stringer("cid", idList[i]),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -466,7 +468,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
|
|||
continue
|
||||
}
|
||||
|
||||
d.log.Debug("container size in storage engine calculated successfully",
|
||||
d.log.Debug(logs.FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully,
|
||||
zap.Uint64("size", sz),
|
||||
zap.Stringer("cid", idList[i]),
|
||||
)
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"net"
|
||||
|
||||
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
|
@ -52,7 +53,7 @@ func initControlService(c *cfg) {
|
|||
|
||||
lis, err := net.Listen("tcp", endpoint)
|
||||
if err != nil {
|
||||
c.log.Error("can't listen gRPC endpoint (control)", zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -33,7 +34,7 @@ func initGRPC(c *cfg) {
|
|||
if tlsCfg != nil {
|
||||
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
|
||||
if err != nil {
|
||||
c.log.Error("could not read certificate from file", zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -63,7 +64,7 @@ func initGRPC(c *cfg) {
|
|||
|
||||
lis, err := net.Listen("tcp", sc.Endpoint())
|
||||
if err != nil {
|
||||
c.log.Error("can't listen gRPC endpoint", zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -93,14 +94,14 @@ func serveGRPC(c *cfg) {
|
|||
|
||||
go func() {
|
||||
defer func() {
|
||||
c.log.Info("stop listening gRPC endpoint",
|
||||
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
|
||||
zap.String("endpoint", lis.Addr().String()),
|
||||
)
|
||||
|
||||
c.wg.Done()
|
||||
}()
|
||||
|
||||
c.log.Info("start listening gRPC endpoint",
|
||||
c.log.Info(logs.FrostFSNodeStartListeningGRPCEndpoint,
|
||||
zap.String("endpoint", lis.Addr().String()),
|
||||
)
|
||||
|
||||
|
@ -114,7 +115,7 @@ func serveGRPC(c *cfg) {
|
|||
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
||||
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
|
||||
|
||||
l.Info("stopping gRPC server...")
|
||||
l.Info(logs.FrostFSNodeStoppingGRPCServer)
|
||||
|
||||
// GracefulStop() may freeze forever, see #1270
|
||||
done := make(chan struct{})
|
||||
|
@ -126,9 +127,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
|||
select {
|
||||
case <-done:
|
||||
case <-time.After(1 * time.Minute):
|
||||
l.Info("gRPC cannot shutdown gracefully, forcing stop")
|
||||
l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
l.Info("gRPC server stopped successfully")
|
||||
l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"os"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
"go.uber.org/zap"
|
||||
|
@ -45,7 +46,7 @@ func main() {
|
|||
os.Exit(SuccessReturnCode)
|
||||
}
|
||||
|
||||
appCfg := config.New(config.Prm{}, config.WithConfigFile(*configFile), config.WithConfigDir(*configDir))
|
||||
appCfg := config.New(*configFile, *configDir, config.EnvPrefix)
|
||||
|
||||
err := validateConfig(appCfg)
|
||||
fatalOnErr(err)
|
||||
|
@ -82,9 +83,8 @@ func initApp(ctx context.Context, c *cfg) {
|
|||
c.wg.Done()
|
||||
}()
|
||||
|
||||
pprof, _ := pprofComponent(c)
|
||||
metrics, _ := metricsComponent(c)
|
||||
initAndLog(c, pprof.name, pprof.init)
|
||||
initAndLog(c, "profiler", initProfilerService)
|
||||
initAndLog(c, metrics.name, metrics.init)
|
||||
|
||||
initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) })
|
||||
|
@ -101,7 +101,6 @@ func initApp(ctx context.Context, c *cfg) {
|
|||
initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
|
||||
initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
|
||||
initAndLog(c, "session", initSessionService)
|
||||
initAndLog(c, "reputation", func(c *cfg) { initReputationService(ctx, c) })
|
||||
initAndLog(c, "notification", func(c *cfg) { initNotifications(ctx, c) })
|
||||
initAndLog(c, "object", initObjectService)
|
||||
initAndLog(c, "tree", initTreeService)
|
||||
|
@ -142,14 +141,14 @@ func bootUp(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
func wait(c *cfg, cancel func()) {
|
||||
c.log.Info("application started",
|
||||
c.log.Info(logs.CommonApplicationStarted,
|
||||
zap.String("version", misc.Version))
|
||||
|
||||
<-c.done // graceful shutdown
|
||||
|
||||
cancel()
|
||||
|
||||
c.log.Debug("waiting for all processes to stop")
|
||||
c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
|
||||
c.wg.Wait()
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
|
||||
)
|
||||
|
||||
func metricsComponent(c *cfg) (*httpComponent, bool) {
|
||||
|
@ -12,7 +12,7 @@ func metricsComponent(c *cfg) (*httpComponent, bool) {
|
|||
c.dynamicConfiguration.metrics = new(httpComponent)
|
||||
c.dynamicConfiguration.metrics.cfg = c
|
||||
c.dynamicConfiguration.metrics.name = "metrics"
|
||||
c.dynamicConfiguration.metrics.handler = promhttp.Handler()
|
||||
c.dynamicConfiguration.metrics.handler = metrics.Handler()
|
||||
updated = true
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"time"
|
||||
|
||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
|
@ -49,7 +50,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
|||
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
|
||||
)
|
||||
if err != nil {
|
||||
c.log.Info("failed to create neo RPC client",
|
||||
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||
zap.Any("endpoints", addresses),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -58,12 +59,12 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
c.onShutdown(func() {
|
||||
c.log.Info("closing morph components...")
|
||||
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
|
||||
cli.Close()
|
||||
})
|
||||
|
||||
if err := cli.SetGroupSignerScope(); err != nil {
|
||||
c.log.Info("failed to set group signer scope, continue with Global", zap.Error(err))
|
||||
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
||||
}
|
||||
|
||||
c.cfgMorph.client = cli
|
||||
|
@ -80,7 +81,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
|||
fatalOnErr(err)
|
||||
}
|
||||
|
||||
c.log.Info("notary support",
|
||||
c.log.Info(logs.FrostFSNodeNotarySupport,
|
||||
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
||||
)
|
||||
|
||||
|
@ -95,7 +96,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
|||
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
|
||||
fatalOnErr(err)
|
||||
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
|
||||
c.log.Debug("morph.cache_ttl fetched from network", zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||
c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||
}
|
||||
|
||||
if c.cfgMorph.cacheTTL < 0 {
|
||||
|
@ -122,7 +123,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
|||
// non-error deposit with an empty TX hash means
|
||||
// that the deposit has already been made; no
|
||||
// need to wait it.
|
||||
c.log.Info("notary deposit has already been made")
|
||||
c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -190,7 +191,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
|
||||
c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
}
|
||||
|
||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||
|
@ -215,7 +216,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
|
||||
res, err := netmapEvent.ParseNewEpoch(src)
|
||||
if err == nil {
|
||||
c.log.Info("new epoch event from sidechain",
|
||||
c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
|
||||
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
|
||||
)
|
||||
}
|
||||
|
@ -226,16 +227,14 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
|
||||
|
||||
registerBlockHandler(lis, func(block *block.Block) {
|
||||
c.log.Debug("new block", zap.Uint32("index", block.Index))
|
||||
c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
|
||||
|
||||
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
|
||||
if err != nil {
|
||||
c.log.Warn("can't update persistent state",
|
||||
c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
|
||||
zap.String("chain", "side"),
|
||||
zap.Uint32("block_index", block.Index))
|
||||
}
|
||||
|
||||
tickBlockTimers(c)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -284,7 +283,6 @@ func lookupScriptHashesInNNS(c *cfg) {
|
|||
{&c.cfgNetmap.scriptHash, client.NNSNetmapContractName},
|
||||
{&c.cfgAccounting.scriptHash, client.NNSBalanceContractName},
|
||||
{&c.cfgContainer.scriptHash, client.NNSContainerContractName},
|
||||
{&c.cfgReputation.scriptHash, client.NNSReputationContractName},
|
||||
{&c.cfgMorph.proxyScriptHash, client.NNSProxyContractName},
|
||||
}
|
||||
)
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
|
||||
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
|
||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
|
@ -18,7 +18,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
netmapService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap"
|
||||
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
|
@ -142,8 +141,6 @@ func initNetmapService(ctx context.Context, c *cfg) {
|
|||
parseAttributes(c)
|
||||
c.cfgNodeInfo.localInfo.SetOffline()
|
||||
|
||||
readSubnetCfg(c)
|
||||
|
||||
if c.cfgMorph.client == nil {
|
||||
initMorphComponents(ctx, c)
|
||||
}
|
||||
|
@ -193,7 +190,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
if (n-c.cfgNetmap.startEpoch)%reBootstrapInterval == 0 {
|
||||
err := c.bootstrap()
|
||||
if err != nil {
|
||||
c.log.Warn("can't send re-bootstrap tx", zap.Error(err))
|
||||
c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -203,7 +200,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
|
||||
ni, err := c.netmapLocalNodeState(e)
|
||||
if err != nil {
|
||||
c.log.Error("could not update node state on new epoch",
|
||||
c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||
zap.Uint64("epoch", e),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -218,7 +215,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
|
||||
_, err := makeNotaryDeposit(c)
|
||||
if err != nil {
|
||||
c.log.Error("could not make notary deposit",
|
||||
c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
@ -226,29 +223,6 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
}
|
||||
}
|
||||
|
||||
func readSubnetCfg(c *cfg) {
|
||||
var subnetCfg nodeconfig.SubnetConfig
|
||||
|
||||
subnetCfg.Init(*c.appCfg)
|
||||
|
||||
var (
|
||||
id subnetid.ID
|
||||
err error
|
||||
)
|
||||
|
||||
subnetCfg.IterateSubnets(func(idTxt string) {
|
||||
err = id.DecodeString(idTxt)
|
||||
fatalOnErrDetails("parse subnet entry", err)
|
||||
|
||||
c.cfgNodeInfo.localInfo.EnterSubnet(id)
|
||||
})
|
||||
|
||||
if subnetCfg.ExitZero() {
|
||||
subnetid.MakeZero(&id)
|
||||
c.cfgNodeInfo.localInfo.ExitSubnet(id)
|
||||
}
|
||||
}
|
||||
|
||||
// bootstrapNode adds current node to the Network map.
|
||||
// Must be called after initNetmapService.
|
||||
func bootstrapNode(c *cfg) {
|
||||
|
@ -298,7 +272,7 @@ func initNetmapState(c *cfg) {
|
|||
}
|
||||
}
|
||||
|
||||
c.log.Info("initial network state",
|
||||
c.log.Info(logs.FrostFSNodeInitialNetworkState,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("state", stateWord),
|
||||
)
|
||||
|
@ -450,8 +424,6 @@ func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
|
|||
ni.SetEpochDuration(netInfoMorph.EpochDuration)
|
||||
ni.SetContainerFee(netInfoMorph.ContainerFee)
|
||||
ni.SetNamedContainerFee(netInfoMorph.ContainerAliasFee)
|
||||
ni.SetNumberOfEigenTrustIterations(netInfoMorph.EigenTrustIterations)
|
||||
ni.SetEigenTrustAlpha(netInfoMorph.EigenTrustAlpha)
|
||||
ni.SetIRCandidateFee(netInfoMorph.IRCandidateFee)
|
||||
ni.SetWithdrawalFee(netInfoMorph.WithdrawalFee)
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
||||
|
@ -28,7 +29,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
|||
|
||||
listRes, err := n.e.ListContainers(engine.ListContainersPrm{})
|
||||
if err != nil {
|
||||
log.Error("notificator: could not list containers", zap.Error(err))
|
||||
log.Error(logs.FrostFSNodeNotificatorCouldNotListContainers, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -41,9 +42,9 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
|||
for _, c := range listRes.Containers() {
|
||||
selectPrm.WithContainerID(c)
|
||||
|
||||
selectRes, err := n.e.Select(selectPrm)
|
||||
selectRes, err := n.e.Select(ctx, selectPrm)
|
||||
if err != nil {
|
||||
log.Error("notificator: could not select objects from container",
|
||||
log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer,
|
||||
zap.Stringer("cid", c),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
@ -53,7 +54,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
|||
for _, a := range selectRes.AddressList() {
|
||||
err = n.processAddress(ctx, a, handler)
|
||||
if err != nil {
|
||||
log.Error("notificator: could not process object",
|
||||
log.Error(logs.FrostFSNodeNotificatorCouldNotProcessObject,
|
||||
zap.Stringer("address", a),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
@ -62,7 +63,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
|||
}
|
||||
}
|
||||
|
||||
log.Debug("notificator: finished processing object notifications")
|
||||
log.Debug(logs.FrostFSNodeNotificatorFinishedProcessingObjectNotifications)
|
||||
}
|
||||
|
||||
func (n *notificationSource) processAddress(
|
||||
|
@ -101,7 +102,7 @@ type notificationWriter struct {
|
|||
|
||||
func (n notificationWriter) Notify(topic string, address oid.Address) {
|
||||
if err := n.w.Notify(topic, address); err != nil {
|
||||
n.l.Warn("could not write object notification",
|
||||
n.l.Warn(logs.FrostFSNodeCouldNotWriteObjectNotification,
|
||||
zap.Stringer("address", address),
|
||||
zap.String("topic", topic),
|
||||
zap.Error(err),
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -11,7 +10,7 @@ import (
|
|||
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
|
||||
policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer"
|
||||
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
|
||||
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||
|
@ -19,6 +18,7 @@ import (
|
|||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
||||
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
||||
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl"
|
||||
|
@ -36,15 +36,10 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
|
||||
truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -62,7 +57,7 @@ type objectSvc struct {
|
|||
func (c *cfg) MaxObjectSize() uint64 {
|
||||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||
if err != nil {
|
||||
c.log.Error("could not get max object size value",
|
||||
c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
@ -152,39 +147,12 @@ func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
type coreClientConstructor reputationClientConstructor
|
||||
|
||||
func (x *coreClientConstructor) Get(info coreclient.NodeInfo) (coreclient.MultiAddressClient, error) {
|
||||
c, err := (*reputationClientConstructor)(x).Get(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.(coreclient.MultiAddressClient), nil
|
||||
}
|
||||
|
||||
func initObjectService(c *cfg) {
|
||||
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
|
||||
|
||||
clientConstructor := &reputationClientConstructor{
|
||||
log: c.log,
|
||||
nmSrc: c.netMapSource,
|
||||
netState: c.cfgNetmap.state,
|
||||
trustStorage: c.cfgReputation.localTrustStorage,
|
||||
basicConstructor: c.bgClientCache,
|
||||
}
|
||||
c.replicator = createReplicator(c, keyStorage, c.bgClientCache)
|
||||
|
||||
coreConstructor := &coreClientConstructor{
|
||||
log: c.log,
|
||||
nmSrc: c.netMapSource,
|
||||
netState: c.cfgNetmap.state,
|
||||
trustStorage: c.cfgReputation.localTrustStorage,
|
||||
basicConstructor: c.clientCache,
|
||||
}
|
||||
|
||||
c.replicator = createReplicator(c, keyStorage, clientConstructor)
|
||||
|
||||
addPolicer(c, keyStorage, clientConstructor)
|
||||
addPolicer(c, keyStorage, c.bgClientCache)
|
||||
|
||||
traverseGen := util.NewTraverserGenerator(c.netMapSource, c.cfgObject.cnrSource, c)
|
||||
|
||||
|
@ -192,11 +160,11 @@ func initObjectService(c *cfg) {
|
|||
|
||||
sPutV2 := createPutSvcV2(sPut, keyStorage)
|
||||
|
||||
sSearch := createSearchSvc(c, keyStorage, traverseGen, coreConstructor)
|
||||
sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache)
|
||||
|
||||
sSearchV2 := createSearchSvcV2(sSearch, keyStorage)
|
||||
|
||||
sGet := createGetService(c, keyStorage, traverseGen, coreConstructor)
|
||||
sGet := createGetService(c, keyStorage, traverseGen, c.clientCache)
|
||||
|
||||
*c.cfgObject.getSvc = *sGet // need smth better
|
||||
|
||||
|
@ -235,7 +203,7 @@ func initObjectService(c *cfg) {
|
|||
}
|
||||
}
|
||||
|
||||
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputationClientConstructor) {
|
||||
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
|
||||
ls := c.cfgObject.cfgLocalStorage.localStorage
|
||||
|
||||
pol := policer.New(
|
||||
|
@ -259,7 +227,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputati
|
|||
|
||||
_, err := ls.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
c.log.Warn("could not inhume mark redundant copy as garbage",
|
||||
c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
@ -287,7 +255,7 @@ func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
|
|||
}
|
||||
}
|
||||
|
||||
func createReplicator(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputationClientConstructor) *replicator.Replicator {
|
||||
func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCache) *replicator.Replicator {
|
||||
ls := c.cfgObject.cfgLocalStorage.localStorage
|
||||
|
||||
return replicator.New(
|
||||
|
@ -297,8 +265,9 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, clientConstructor *re
|
|||
),
|
||||
replicator.WithLocalStorage(ls),
|
||||
replicator.WithRemoteSender(
|
||||
putsvc.NewRemoteSender(keyStorage, (*coreClientConstructor)(clientConstructor)),
|
||||
putsvc.NewRemoteSender(keyStorage, cache),
|
||||
),
|
||||
replicator.WithMetrics(c.metricsCollector),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -318,17 +287,9 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage) *putsvc.Service {
|
|||
}
|
||||
}
|
||||
|
||||
putConstructor := &coreClientConstructor{
|
||||
log: c.log,
|
||||
nmSrc: c.netMapSource,
|
||||
netState: c.cfgNetmap.state,
|
||||
trustStorage: c.cfgReputation.localTrustStorage,
|
||||
basicConstructor: c.putClientCache,
|
||||
}
|
||||
|
||||
return putsvc.NewService(
|
||||
putsvc.WithKeyStorage(keyStorage),
|
||||
putsvc.WithClientConstructor(putConstructor),
|
||||
putsvc.WithClientConstructor(c.putClientCache),
|
||||
putsvc.WithMaxSizeSource(newCachedMaxObjectSizeSource(c)),
|
||||
putsvc.WithObjectStorage(os),
|
||||
putsvc.WithContainerSource(c.cfgObject.cnrSource),
|
||||
|
@ -347,7 +308,7 @@ func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2
|
|||
)
|
||||
}
|
||||
|
||||
func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *coreClientConstructor) *searchsvc.Service {
|
||||
func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
|
||||
ls := c.cfgObject.cfgLocalStorage.localStorage
|
||||
|
||||
return searchsvc.New(
|
||||
|
@ -372,21 +333,18 @@ func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage)
|
|||
}
|
||||
|
||||
func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator,
|
||||
coreConstructor *coreClientConstructor) *getsvc.Service {
|
||||
coreConstructor *cache.ClientCache) *getsvc.Service {
|
||||
ls := c.cfgObject.cfgLocalStorage.localStorage
|
||||
|
||||
return getsvc.New(
|
||||
getsvc.WithLogger(c.log),
|
||||
getsvc.WithLocalStorageEngine(ls),
|
||||
getsvc.WithClientConstructor(coreConstructor),
|
||||
getsvc.WithTraverserGenerator(
|
||||
traverseGen.WithTraverseOptions(
|
||||
placement.SuccessAfter(1),
|
||||
),
|
||||
keyStorage,
|
||||
c.netMapSource,
|
||||
ls,
|
||||
traverseGen.WithTraverseOptions(
|
||||
placement.SuccessAfter(1),
|
||||
),
|
||||
getsvc.WithNetMapSource(c.netMapSource),
|
||||
getsvc.WithKeyStorage(keyStorage),
|
||||
)
|
||||
coreConstructor,
|
||||
getsvc.WithLogger(c.log))
|
||||
}
|
||||
|
||||
func createGetServiceV2(sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service {
|
||||
|
@ -479,135 +437,6 @@ func (s *morphEACLFetcher) GetEACL(cnr cid.ID) (*containercore.EACL, error) {
|
|||
return eaclInfo, nil
|
||||
}
|
||||
|
||||
type reputationClientConstructor struct {
|
||||
log *logger.Logger
|
||||
|
||||
nmSrc netmap.Source
|
||||
|
||||
netState netmap.State
|
||||
|
||||
trustStorage *truststorage.Storage
|
||||
|
||||
basicConstructor interface {
|
||||
Get(coreclient.NodeInfo) (coreclient.Client, error)
|
||||
}
|
||||
}
|
||||
|
||||
type reputationClient struct {
|
||||
coreclient.MultiAddressClient
|
||||
|
||||
prm truststorage.UpdatePrm
|
||||
|
||||
cons *reputationClientConstructor
|
||||
}
|
||||
|
||||
func (c *reputationClient) submitResult(err error) {
|
||||
currEpoch := c.cons.netState.CurrentEpoch()
|
||||
sat := err == nil
|
||||
|
||||
c.cons.log.Debug(
|
||||
"writing local reputation values",
|
||||
zap.Uint64("epoch", currEpoch),
|
||||
zap.Bool("satisfactory", sat),
|
||||
)
|
||||
|
||||
prm := c.prm
|
||||
prm.SetSatisfactory(sat)
|
||||
prm.SetEpoch(currEpoch)
|
||||
|
||||
c.cons.trustStorage.Update(prm)
|
||||
}
|
||||
|
||||
func (c *reputationClient) ObjectPutInit(ctx context.Context, prm client.PrmObjectPutInit) (*client.ObjectWriter, error) {
|
||||
res, err := c.MultiAddressClient.ObjectPutInit(ctx, prm)
|
||||
|
||||
// FIXME: (neofs-node#1193) here we submit only initialization errors, writing errors are not processed
|
||||
c.submitResult(err)
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c *reputationClient) ObjectDelete(ctx context.Context, prm client.PrmObjectDelete) (*client.ResObjectDelete, error) {
|
||||
res, err := c.MultiAddressClient.ObjectDelete(ctx, prm)
|
||||
if err != nil {
|
||||
c.submitResult(err)
|
||||
} else {
|
||||
c.submitResult(apistatus.ErrFromStatus(res.Status()))
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c *reputationClient) GetObjectInit(ctx context.Context, prm client.PrmObjectGet) (*client.ObjectReader, error) {
|
||||
res, err := c.MultiAddressClient.ObjectGetInit(ctx, prm)
|
||||
|
||||
// FIXME: (neofs-node#1193) here we submit only initialization errors, reading errors are not processed
|
||||
c.submitResult(err)
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c *reputationClient) ObjectHead(ctx context.Context, prm client.PrmObjectHead) (*client.ResObjectHead, error) {
|
||||
res, err := c.MultiAddressClient.ObjectHead(ctx, prm)
|
||||
|
||||
c.submitResult(err)
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c *reputationClient) ObjectHash(ctx context.Context, prm client.PrmObjectHash) (*client.ResObjectHash, error) {
|
||||
res, err := c.MultiAddressClient.ObjectHash(ctx, prm)
|
||||
|
||||
c.submitResult(err)
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c *reputationClient) ObjectSearchInit(ctx context.Context, prm client.PrmObjectSearch) (*client.ObjectListReader, error) {
|
||||
res, err := c.MultiAddressClient.ObjectSearchInit(ctx, prm)
|
||||
|
||||
// FIXME: (neofs-node#1193) here we submit only initialization errors, reading errors are not processed
|
||||
c.submitResult(err)
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient.Client, error) {
|
||||
cl, err := c.basicConstructor.Get(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nm, err := netmap.GetLatestNetworkMap(c.nmSrc)
|
||||
if err == nil {
|
||||
key := info.PublicKey()
|
||||
|
||||
nmNodes := nm.Nodes()
|
||||
var peer apireputation.PeerID
|
||||
|
||||
for i := range nmNodes {
|
||||
if bytes.Equal(nmNodes[i].PublicKey(), key) {
|
||||
peer.SetPublicKey(nmNodes[i].PublicKey())
|
||||
|
||||
prm := truststorage.UpdatePrm{}
|
||||
prm.SetPeer(peer)
|
||||
|
||||
return &reputationClient{
|
||||
MultiAddressClient: cl.(coreclient.MultiAddressClient),
|
||||
prm: prm,
|
||||
cons: c,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
c.log.Warn("could not get latest network map to overload the client",
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
||||
return cl, nil
|
||||
}
|
||||
|
||||
type engineWithNotifications struct {
|
||||
base putsvc.ObjectStorage
|
||||
nw notificationWriter
|
||||
|
@ -616,20 +445,20 @@ type engineWithNotifications struct {
|
|||
defaultTopic string
|
||||
}
|
||||
|
||||
func (e engineWithNotifications) IsLocked(address oid.Address) (bool, error) {
|
||||
return e.base.IsLocked(address)
|
||||
func (e engineWithNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) {
|
||||
return e.base.IsLocked(ctx, address)
|
||||
}
|
||||
|
||||
func (e engineWithNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
|
||||
return e.base.Delete(ctx, tombstone, toDelete)
|
||||
}
|
||||
|
||||
func (e engineWithNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
|
||||
return e.base.Lock(locker, toLock)
|
||||
func (e engineWithNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
|
||||
return e.base.Lock(ctx, locker, toLock)
|
||||
}
|
||||
|
||||
func (e engineWithNotifications) Put(o *objectSDK.Object) error {
|
||||
if err := e.base.Put(o); err != nil {
|
||||
func (e engineWithNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
|
||||
if err := e.base.Put(ctx, o); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -653,8 +482,8 @@ type engineWithoutNotifications struct {
|
|||
engine *engine.StorageEngine
|
||||
}
|
||||
|
||||
func (e engineWithoutNotifications) IsLocked(address oid.Address) (bool, error) {
|
||||
return e.engine.IsLocked(address)
|
||||
func (e engineWithoutNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) {
|
||||
return e.engine.IsLocked(ctx, address)
|
||||
}
|
||||
|
||||
func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
|
||||
|
@ -672,10 +501,10 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
|
|||
return err
|
||||
}
|
||||
|
||||
func (e engineWithoutNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
|
||||
return e.engine.Lock(locker.Container(), locker.Object(), toLock)
|
||||
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
|
||||
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
|
||||
}
|
||||
|
||||
func (e engineWithoutNotifications) Put(o *objectSDK.Object) error {
|
||||
return engine.Put(e.engine, o)
|
||||
func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
|
||||
return engine.Put(ctx, e.engine, o)
|
||||
}
|
||||
|
|
|
@ -1,10 +1,19 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler"
|
||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||
)
|
||||
|
||||
func initProfilerService(c *cfg) {
|
||||
tuneProfilers(c)
|
||||
|
||||
pprof, _ := pprofComponent(c)
|
||||
pprof.init(c)
|
||||
}
|
||||
|
||||
func pprofComponent(c *cfg) (*httpComponent, bool) {
|
||||
var updated bool
|
||||
// check if it has been inited before
|
||||
|
@ -13,6 +22,7 @@ func pprofComponent(c *cfg) (*httpComponent, bool) {
|
|||
c.dynamicConfiguration.pprof.cfg = c
|
||||
c.dynamicConfiguration.pprof.name = "pprof"
|
||||
c.dynamicConfiguration.pprof.handler = httputil.Handler()
|
||||
c.dynamicConfiguration.pprof.preReload = tuneProfilers
|
||||
updated = true
|
||||
}
|
||||
|
||||
|
@ -35,3 +45,18 @@ func pprofComponent(c *cfg) (*httpComponent, bool) {
|
|||
|
||||
return c.dynamicConfiguration.pprof, updated
|
||||
}
|
||||
|
||||
func tuneProfilers(c *cfg) {
|
||||
// Disabled by default, see documentation for
|
||||
// runtime.SetBlockProfileRate() and runtime.SetMutexProfileFraction().
|
||||
blockRate := 0
|
||||
mutexRate := 0
|
||||
|
||||
if profilerconfig.Enabled(c.appCfg) {
|
||||
blockRate = profilerconfig.BlockRate(c.appCfg)
|
||||
mutexRate = profilerconfig.MutexRate(c.appCfg)
|
||||
}
|
||||
|
||||
runtime.SetBlockProfileRate(blockRate)
|
||||
runtime.SetMutexProfileFraction(mutexRate)
|
||||
}
|
||||
|
|
|
@ -1,385 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v2reputation "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
|
||||
v2reputationgrpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
|
||||
intermediatereputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/intermediate"
|
||||
localreputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/local"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker"
|
||||
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
||||
grpcreputation "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/reputation/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||
reputationrouter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common/router"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
|
||||
eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
|
||||
eigentrustctrl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/controller"
|
||||
intermediateroutes "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/routes"
|
||||
consumerstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/consumers"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
|
||||
localtrustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller"
|
||||
localroutes "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/routes"
|
||||
truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage"
|
||||
reputationrpc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/rpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func initReputationService(ctx context.Context, c *cfg) {
|
||||
wrap, err := repClient.NewFromMorph(c.cfgMorph.client, c.cfgReputation.scriptHash, 0, repClient.TryNotary())
|
||||
fatalOnErr(err)
|
||||
|
||||
localKey := c.key.PublicKey().Bytes()
|
||||
|
||||
nmSrc := c.netMapSource
|
||||
|
||||
// storing calculated trusts as a daughter
|
||||
c.cfgReputation.localTrustStorage = truststorage.New(
|
||||
truststorage.Prm{},
|
||||
)
|
||||
|
||||
daughterStorage := daughters.New(daughters.Prm{})
|
||||
consumerStorage := consumerstorage.New(consumerstorage.Prm{})
|
||||
|
||||
localTrustLogger := &logger.Logger{Logger: c.log.With(zap.String("trust_type", "local"))}
|
||||
|
||||
managerBuilder := reputationcommon.NewManagerBuilder(
|
||||
reputationcommon.ManagersPrm{
|
||||
NetMapSource: nmSrc,
|
||||
},
|
||||
reputationcommon.WithLogger(c.log),
|
||||
)
|
||||
|
||||
localRouteBuilder := localroutes.New(
|
||||
localroutes.Prm{
|
||||
ManagerBuilder: managerBuilder,
|
||||
Log: localTrustLogger,
|
||||
},
|
||||
)
|
||||
|
||||
localTrustRouter := createLocalTrustRouter(c, localRouteBuilder, localTrustLogger, daughterStorage)
|
||||
|
||||
intermediateTrustRouter := createIntermediateTrustRouter(c, consumerStorage, managerBuilder)
|
||||
|
||||
eigenTrustController := createEigenTrustController(c, intermediateTrustRouter, localKey, wrap, daughterStorage, consumerStorage)
|
||||
|
||||
c.cfgReputation.localTrustCtrl = createLocalTrustController(c, localTrustLogger, localKey, localTrustRouter)
|
||||
|
||||
addReputationReportHandler(ctx, c)
|
||||
|
||||
server := grpcreputation.New(
|
||||
reputationrpc.NewSignService(
|
||||
&c.key.PrivateKey,
|
||||
reputationrpc.NewResponseService(
|
||||
&reputationServer{
|
||||
cfg: c,
|
||||
log: c.log,
|
||||
localRouter: localTrustRouter,
|
||||
intermediateRouter: intermediateTrustRouter,
|
||||
routeBuilder: localRouteBuilder,
|
||||
},
|
||||
c.respSvc,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
for _, srv := range c.cfgGRPC.servers {
|
||||
v2reputationgrpc.RegisterReputationServiceServer(srv, server)
|
||||
}
|
||||
|
||||
// initialize eigen trust block timer
|
||||
newEigenTrustIterTimer(c)
|
||||
|
||||
addEigenTrustEpochHandler(ctx, c, eigenTrustController)
|
||||
}
|
||||
|
||||
func addReputationReportHandler(ctx context.Context, c *cfg) {
|
||||
addNewEpochAsyncNotificationHandler(
|
||||
c,
|
||||
func(ev event.Event) {
|
||||
c.log.Debug("start reporting reputation on new epoch event")
|
||||
|
||||
var reportPrm localtrustcontroller.ReportPrm
|
||||
|
||||
// report collected values from previous epoch
|
||||
reportPrm.SetEpoch(ev.(netmap.NewEpoch).EpochNumber() - 1)
|
||||
|
||||
c.cfgReputation.localTrustCtrl.Report(ctx, reportPrm)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController *eigentrustctrl.Controller) {
|
||||
addNewEpochAsyncNotificationHandler(
|
||||
c,
|
||||
func(e event.Event) {
|
||||
epoch := e.(netmap.NewEpoch).EpochNumber()
|
||||
|
||||
log := c.log.With(zap.Uint64("epoch", epoch))
|
||||
|
||||
duration, err := c.cfgNetmap.wrapper.EpochDuration()
|
||||
if err != nil {
|
||||
log.Debug("could not fetch epoch duration", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
iterations, err := c.cfgNetmap.wrapper.EigenTrustIterations()
|
||||
if err != nil {
|
||||
log.Debug("could not fetch iteration number", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
epochTimer, err := ticker.NewIterationsTicker(duration, iterations, func() {
|
||||
eigenTrustController.Continue(ctx,
|
||||
eigentrustctrl.ContinuePrm{
|
||||
Epoch: epoch - 1,
|
||||
},
|
||||
)
|
||||
})
|
||||
if err != nil {
|
||||
log.Debug("could not create fixed epoch timer", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
c.cfgMorph.eigenTrustTicker.addEpochTimer(epoch, epochTimer)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func createLocalTrustRouter(c *cfg, localRouteBuilder *localroutes.Builder, localTrustLogger *logger.Logger, daughterStorage *daughters.Storage) *reputationrouter.Router {
|
||||
// storing received daughter(of current node) trusts as a manager
|
||||
daughterStorageWriterProvider := &intermediatereputation.DaughterStorageWriterProvider{
|
||||
Log: c.log,
|
||||
Storage: daughterStorage,
|
||||
}
|
||||
|
||||
remoteLocalTrustProvider := common.NewRemoteTrustProvider(
|
||||
common.RemoteProviderPrm{
|
||||
NetmapKeys: c,
|
||||
DeadEndProvider: daughterStorageWriterProvider,
|
||||
ClientCache: c.bgClientCache,
|
||||
WriterProvider: localreputation.NewRemoteProvider(
|
||||
localreputation.RemoteProviderPrm{
|
||||
Key: &c.key.PrivateKey,
|
||||
Log: localTrustLogger,
|
||||
},
|
||||
),
|
||||
Log: localTrustLogger,
|
||||
},
|
||||
)
|
||||
|
||||
localTrustRouter := reputationrouter.New(
|
||||
reputationrouter.Prm{
|
||||
LocalServerInfo: c,
|
||||
RemoteWriterProvider: remoteLocalTrustProvider,
|
||||
Builder: localRouteBuilder,
|
||||
},
|
||||
reputationrouter.WithLogger(localTrustLogger))
|
||||
return localTrustRouter
|
||||
}
|
||||
|
||||
func createIntermediateTrustRouter(c *cfg, consumerStorage *consumerstorage.Storage, managerBuilder reputationcommon.ManagerBuilder) *reputationrouter.Router {
|
||||
intermediateTrustLogger := &logger.Logger{Logger: c.log.With(zap.String("trust_type", "intermediate"))}
|
||||
|
||||
consumerStorageWriterProvider := &intermediatereputation.ConsumerStorageWriterProvider{
|
||||
Log: c.log,
|
||||
Storage: consumerStorage,
|
||||
}
|
||||
|
||||
remoteIntermediateTrustProvider := common.NewRemoteTrustProvider(
|
||||
common.RemoteProviderPrm{
|
||||
NetmapKeys: c,
|
||||
DeadEndProvider: consumerStorageWriterProvider,
|
||||
ClientCache: c.bgClientCache,
|
||||
WriterProvider: intermediatereputation.NewRemoteProvider(
|
||||
intermediatereputation.RemoteProviderPrm{
|
||||
Key: &c.key.PrivateKey,
|
||||
Log: intermediateTrustLogger,
|
||||
},
|
||||
),
|
||||
Log: intermediateTrustLogger,
|
||||
},
|
||||
)
|
||||
|
||||
intermediateRouteBuilder := intermediateroutes.New(
|
||||
intermediateroutes.Prm{
|
||||
ManagerBuilder: managerBuilder,
|
||||
Log: intermediateTrustLogger,
|
||||
},
|
||||
)
|
||||
|
||||
intermediateTrustRouter := reputationrouter.New(
|
||||
reputationrouter.Prm{
|
||||
LocalServerInfo: c,
|
||||
RemoteWriterProvider: remoteIntermediateTrustProvider,
|
||||
Builder: intermediateRouteBuilder,
|
||||
},
|
||||
reputationrouter.WithLogger(intermediateTrustLogger),
|
||||
)
|
||||
return intermediateTrustRouter
|
||||
}
|
||||
|
||||
func createEigenTrustController(c *cfg, intermediateTrustRouter *reputationrouter.Router, localKey []byte, wrap *repClient.Client,
|
||||
daughterStorage *daughters.Storage, consumerStorage *consumerstorage.Storage) *eigentrustctrl.Controller {
|
||||
eigenTrustCalculator := eigentrustcalc.New(
|
||||
eigentrustcalc.Prm{
|
||||
AlphaProvider: c.cfgNetmap.wrapper,
|
||||
InitialTrustSource: intermediatereputation.InitialTrustSource{
|
||||
NetMap: c.netMapSource,
|
||||
},
|
||||
IntermediateValueTarget: intermediateTrustRouter,
|
||||
WorkerPool: c.cfgReputation.workerPool,
|
||||
FinalResultTarget: intermediatereputation.NewFinalWriterProvider(
|
||||
intermediatereputation.FinalWriterProviderPrm{
|
||||
PrivatKey: &c.key.PrivateKey,
|
||||
PubKey: localKey,
|
||||
Client: wrap,
|
||||
},
|
||||
intermediatereputation.FinalWriterWithLogger(c.log),
|
||||
),
|
||||
DaughterTrustSource: &intermediatereputation.DaughterTrustIteratorProvider{
|
||||
DaughterStorage: daughterStorage,
|
||||
ConsumerStorage: consumerStorage,
|
||||
},
|
||||
},
|
||||
eigentrustcalc.WithLogger(c.log),
|
||||
)
|
||||
|
||||
eigenTrustController := eigentrustctrl.New(
|
||||
eigentrustctrl.Prm{
|
||||
DaughtersTrustCalculator: &intermediatereputation.DaughtersTrustCalculator{
|
||||
Calculator: eigenTrustCalculator,
|
||||
},
|
||||
IterationsProvider: c.cfgNetmap.wrapper,
|
||||
WorkerPool: c.cfgReputation.workerPool,
|
||||
},
|
||||
eigentrustctrl.WithLogger(c.log),
|
||||
)
|
||||
return eigenTrustController
|
||||
}
|
||||
|
||||
func createLocalTrustController(c *cfg, localTrustLogger *logger.Logger, localKey []byte, localTrustRouter *reputationrouter.Router) *localtrustcontroller.Controller {
|
||||
localTrustStorage := &localreputation.TrustStorage{
|
||||
Log: localTrustLogger,
|
||||
Storage: c.cfgReputation.localTrustStorage,
|
||||
NmSrc: c.netMapSource,
|
||||
LocalKey: localKey,
|
||||
}
|
||||
|
||||
return localtrustcontroller.New(
|
||||
localtrustcontroller.Prm{
|
||||
LocalTrustSource: localTrustStorage,
|
||||
LocalTrustTarget: localTrustRouter,
|
||||
},
|
||||
localtrustcontroller.WithLogger(c.log),
|
||||
)
|
||||
}
|
||||
|
||||
type reputationServer struct {
|
||||
*cfg
|
||||
log *logger.Logger
|
||||
localRouter *reputationrouter.Router
|
||||
intermediateRouter *reputationrouter.Router
|
||||
routeBuilder reputationrouter.Builder
|
||||
}
|
||||
|
||||
func (s *reputationServer) AnnounceLocalTrust(ctx context.Context, req *v2reputation.AnnounceLocalTrustRequest) (*v2reputation.AnnounceLocalTrustResponse, error) {
|
||||
passedRoute := reverseRoute(req.GetVerificationHeader())
|
||||
passedRoute = append(passedRoute, s)
|
||||
|
||||
body := req.GetBody()
|
||||
|
||||
ep := &common.EpochProvider{
|
||||
E: body.GetEpoch(),
|
||||
}
|
||||
|
||||
w, err := s.localRouter.InitWriter(reputationrouter.NewRouteInfo(ep, passedRoute))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize local trust writer: %w", err)
|
||||
}
|
||||
|
||||
for _, trust := range body.GetTrusts() {
|
||||
err = s.processLocalTrust(ctx, body.GetEpoch(), apiToLocalTrust(&trust, passedRoute[0].PublicKey()), passedRoute, w)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not write one of local trusts: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
resp := new(v2reputation.AnnounceLocalTrustResponse)
|
||||
resp.SetBody(new(v2reputation.AnnounceLocalTrustResponseBody))
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *reputationServer) AnnounceIntermediateResult(ctx context.Context, req *v2reputation.AnnounceIntermediateResultRequest) (*v2reputation.AnnounceIntermediateResultResponse, error) {
|
||||
passedRoute := reverseRoute(req.GetVerificationHeader())
|
||||
passedRoute = append(passedRoute, s)
|
||||
|
||||
body := req.GetBody()
|
||||
|
||||
ei := eigentrust.NewEpochIteration(body.GetEpoch(), body.GetIteration())
|
||||
|
||||
w, err := s.intermediateRouter.InitWriter(reputationrouter.NewRouteInfo(ei, passedRoute))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize trust writer: %w", err)
|
||||
}
|
||||
|
||||
v2Trust := body.GetTrust()
|
||||
|
||||
trust := apiToLocalTrust(v2Trust.GetTrust(), v2Trust.GetTrustingPeer().GetPublicKey())
|
||||
|
||||
err = w.Write(ctx, trust)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not write trust: %w", err)
|
||||
}
|
||||
|
||||
resp := new(v2reputation.AnnounceIntermediateResultResponse)
|
||||
resp.SetBody(new(v2reputation.AnnounceIntermediateResultResponseBody))
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *reputationServer) processLocalTrust(ctx context.Context, epoch uint64, t reputation.Trust,
|
||||
passedRoute []reputationcommon.ServerInfo, w reputationcommon.Writer) error {
|
||||
err := reputationrouter.CheckRoute(s.routeBuilder, epoch, t, passedRoute)
|
||||
if err != nil {
|
||||
return fmt.Errorf("wrong route of reputation trust value: %w", err)
|
||||
}
|
||||
|
||||
return w.Write(ctx, t)
|
||||
}
|
||||
|
||||
// apiToLocalTrust converts v2 Trust to local reputation.Trust, adding trustingPeer.
|
||||
func apiToLocalTrust(t *v2reputation.Trust, trustingPeer []byte) reputation.Trust {
|
||||
var trusted, trusting apireputation.PeerID
|
||||
trusted.SetPublicKey(t.GetPeer().GetPublicKey())
|
||||
trusting.SetPublicKey(trustingPeer)
|
||||
|
||||
localTrust := reputation.Trust{}
|
||||
|
||||
localTrust.SetValue(reputation.TrustValueFromFloat64(t.GetValue()))
|
||||
localTrust.SetPeer(trusted)
|
||||
localTrust.SetTrustingPeer(trusting)
|
||||
|
||||
return localTrust
|
||||
}
|
||||
|
||||
func reverseRoute(hdr *session.RequestVerificationHeader) (passedRoute []reputationcommon.ServerInfo) {
|
||||
for hdr != nil {
|
||||
passedRoute = append(passedRoute, &common.OnlyKeyRemoteServerInfo{
|
||||
Key: hdr.GetBodySignature().GetKey(),
|
||||
})
|
||||
|
||||
hdr = hdr.GetOrigin()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||
trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
)
|
||||
|
||||
type clientCache interface {
|
||||
Get(client.NodeInfo) (client.Client, error)
|
||||
}
|
||||
|
||||
// clientKeyRemoteProvider must provide a remote writer and take into account
|
||||
// that requests must be sent via the passed api client and must be signed with
|
||||
// the passed private key.
|
||||
type clientKeyRemoteProvider interface {
|
||||
WithClient(client.Client) reputationcommon.WriterProvider
|
||||
}
|
||||
|
||||
// RemoteTrustProvider is an implementation of reputation RemoteWriterProvider interface.
|
||||
// It caches clients, checks if it is the end of the route and checks either the current
|
||||
// node is a remote target or not.
|
||||
//
|
||||
// remoteTrustProvider requires to be provided with clientKeyRemoteProvider.
|
||||
type RemoteTrustProvider struct {
|
||||
netmapKeys netmap.AnnouncedKeys
|
||||
deadEndProvider reputationcommon.WriterProvider
|
||||
clientCache clientCache
|
||||
remoteProvider clientKeyRemoteProvider
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// RemoteProviderPrm groups the required parameters of the remoteTrustProvider's constructor.
|
||||
//
|
||||
// All values must comply with the requirements imposed on them.
|
||||
// Passing incorrect parameter values will result in constructor
|
||||
// failure (error or panic depending on the implementation).
|
||||
type RemoteProviderPrm struct {
|
||||
NetmapKeys netmap.AnnouncedKeys
|
||||
DeadEndProvider reputationcommon.WriterProvider
|
||||
ClientCache clientCache
|
||||
WriterProvider clientKeyRemoteProvider
|
||||
Log *logger.Logger
|
||||
}
|
||||
|
||||
func NewRemoteTrustProvider(prm RemoteProviderPrm) *RemoteTrustProvider {
|
||||
switch {
|
||||
case prm.NetmapKeys == nil:
|
||||
PanicOnPrmValue("NetmapKeys", prm.NetmapKeys)
|
||||
case prm.DeadEndProvider == nil:
|
||||
PanicOnPrmValue("DeadEndProvider", prm.DeadEndProvider)
|
||||
case prm.ClientCache == nil:
|
||||
PanicOnPrmValue("ClientCache", prm.ClientCache)
|
||||
case prm.WriterProvider == nil:
|
||||
PanicOnPrmValue("WriterProvider", prm.WriterProvider)
|
||||
case prm.Log == nil:
|
||||
PanicOnPrmValue("Logger", prm.Log)
|
||||
}
|
||||
|
||||
return &RemoteTrustProvider{
|
||||
netmapKeys: prm.NetmapKeys,
|
||||
deadEndProvider: prm.DeadEndProvider,
|
||||
clientCache: prm.ClientCache,
|
||||
remoteProvider: prm.WriterProvider,
|
||||
log: prm.Log,
|
||||
}
|
||||
}
|
||||
|
||||
func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) {
|
||||
rtp.log.Debug("initializing remote writer provider")
|
||||
|
||||
if srv == nil {
|
||||
rtp.log.Debug("route has reached dead-end provider")
|
||||
return rtp.deadEndProvider, nil
|
||||
}
|
||||
|
||||
if rtp.netmapKeys.IsLocalKey(srv.PublicKey()) {
|
||||
// if local => return no-op writer
|
||||
rtp.log.Debug("initializing no-op writer provider")
|
||||
return trustcontroller.SimpleWriterProvider(new(NopReputationWriter)), nil
|
||||
}
|
||||
|
||||
var info client.NodeInfo
|
||||
|
||||
err := client.NodeInfoFromRawNetmapElement(&info, srv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse client node info: %w", err)
|
||||
}
|
||||
|
||||
c, err := rtp.clientCache.Get(info)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize API client: %w", err)
|
||||
}
|
||||
|
||||
return rtp.remoteProvider.WithClient(c), nil
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||
)
|
||||
|
||||
type EpochProvider struct {
|
||||
E uint64
|
||||
}
|
||||
|
||||
func (ep *EpochProvider) Epoch() uint64 {
|
||||
return ep.E
|
||||
}
|
||||
|
||||
type NopReputationWriter struct{}
|
||||
|
||||
func (NopReputationWriter) Write(context.Context, reputation.Trust) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (NopReputationWriter) Close(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnlyKeyRemoteServerInfo is an implementation of reputation.ServerInfo
|
||||
// interface but with only public key data.
|
||||
type OnlyKeyRemoteServerInfo struct {
|
||||
Key []byte
|
||||
}
|
||||
|
||||
func (i *OnlyKeyRemoteServerInfo) PublicKey() []byte {
|
||||
return i.Key
|
||||
}
|
||||
|
||||
func (*OnlyKeyRemoteServerInfo) IterateAddresses(func(string) bool) {
|
||||
}
|
||||
|
||||
func (*OnlyKeyRemoteServerInfo) NumberOfAddresses() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (*OnlyKeyRemoteServerInfo) ExternalAddresses() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
const invalidPrmValFmt = "invalid parameter %s (%T):%v"
|
||||
|
||||
func PanicOnPrmValue(n string, v any) {
|
||||
panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
package intermediate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
|
||||
eigencalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
|
||||
eigentrustctrl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/controller"
|
||||
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||
)
|
||||
|
||||
// InitialTrustSource is an implementation of the
|
||||
// reputation/eigentrust/calculator's InitialTrustSource interface.
|
||||
type InitialTrustSource struct {
|
||||
NetMap netmap.Source
|
||||
}
|
||||
|
||||
var ErrEmptyNetMap = errors.New("empty NepMap")
|
||||
|
||||
// InitialTrust returns `initialTrust` as an initial trust value.
|
||||
func (i InitialTrustSource) InitialTrust(apireputation.PeerID) (reputation.TrustValue, error) {
|
||||
nm, err := i.NetMap.GetNetMap(1)
|
||||
if err != nil {
|
||||
return reputation.TrustZero, fmt.Errorf("failed to get NetMap: %w", err)
|
||||
}
|
||||
|
||||
nodeCount := reputation.TrustValueFromFloat64(float64(len(nm.Nodes())))
|
||||
if nodeCount == 0 {
|
||||
return reputation.TrustZero, ErrEmptyNetMap
|
||||
}
|
||||
|
||||
return reputation.TrustOne.Div(nodeCount), nil
|
||||
}
|
||||
|
||||
// DaughtersTrustCalculator wraps EigenTrust calculator and implements the
|
||||
// eigentrust/calculator's DaughtersTrustCalculator interface.
|
||||
type DaughtersTrustCalculator struct {
|
||||
Calculator *eigencalc.Calculator
|
||||
}
|
||||
|
||||
// Calculate converts and passes values to the wrapped calculator.
|
||||
func (c *DaughtersTrustCalculator) Calculate(ctx context.Context, iterCtx eigentrustctrl.IterationContext) {
|
||||
calcPrm := eigencalc.CalculatePrm{}
|
||||
epochIteration := eigentrust.EpochIteration{}
|
||||
|
||||
epochIteration.SetEpoch(iterCtx.Epoch())
|
||||
epochIteration.SetI(iterCtx.I())
|
||||
|
||||
calcPrm.SetLast(iterCtx.Last())
|
||||
calcPrm.SetEpochIteration(epochIteration)
|
||||
|
||||
c.Calculator.Calculate(ctx, calcPrm)
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
package intermediate
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
|
||||
eigencalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
|
||||
consumerstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/consumers"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var ErrIncorrectContextPanicMsg = "could not write intermediate trust: passed context incorrect"
|
||||
|
||||
// ConsumerStorageWriterProvider is an implementation of the reputation.WriterProvider
|
||||
// interface that provides ConsumerTrustWriter writer.
|
||||
type ConsumerStorageWriterProvider struct {
|
||||
Log *logger.Logger
|
||||
Storage *consumerstorage.Storage
|
||||
}
|
||||
|
||||
// ConsumerTrustWriter is an implementation of the reputation.Writer interface
|
||||
// that writes passed consumer's Trust values to the Consumer storage. After writing
|
||||
// that, values can be used in eigenTrust algorithm's iterations.
|
||||
type ConsumerTrustWriter struct {
|
||||
log *logger.Logger
|
||||
storage *consumerstorage.Storage
|
||||
iterInfo eigencalc.EpochIterationInfo
|
||||
}
|
||||
|
||||
func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error {
|
||||
w.log.Debug("writing received consumer's trusts",
|
||||
zap.Uint64("epoch", w.iterInfo.Epoch()),
|
||||
zap.Uint32("iteration", w.iterInfo.I()),
|
||||
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
||||
zap.Stringer("trusted_peer", t.Peer()),
|
||||
)
|
||||
|
||||
trust := eigentrust.IterationTrust{Trust: t}
|
||||
|
||||
trust.SetEpoch(w.iterInfo.Epoch())
|
||||
trust.SetI(w.iterInfo.I())
|
||||
|
||||
w.storage.Put(trust)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *ConsumerTrustWriter) Close(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ConsumerStorageWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
|
||||
iterInfo, ok := ep.(eigencalc.EpochIterationInfo)
|
||||
if !ok {
|
||||
panic(ErrIncorrectContextPanicMsg)
|
||||
}
|
||||
|
||||
return &ConsumerTrustWriter{
|
||||
log: s.Log,
|
||||
storage: s.Storage,
|
||||
iterInfo: iterInfo,
|
||||
}, nil
|
||||
}
|
|
@ -1,146 +0,0 @@
|
|||
package intermediate
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
|
||||
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
|
||||
eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
|
||||
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// FinalWriterProviderPrm groups the required parameters of the FinalWriterProvider's constructor.
|
||||
//
|
||||
// All values must comply with the requirements imposed on them.
|
||||
// Passing incorrect parameter values will result in constructor
|
||||
// failure (error or panic depending on the implementation).
|
||||
type FinalWriterProviderPrm struct {
|
||||
PrivatKey *ecdsa.PrivateKey
|
||||
PubKey []byte
|
||||
Client *repClient.Client
|
||||
}
|
||||
|
||||
// NewFinalWriterProvider creates a new instance of the FinalWriterProvider.
|
||||
//
|
||||
// Panics if at least one value of the parameters is invalid.
|
||||
//
|
||||
// The created FinalWriterProvider does not require additional
|
||||
// initialization and is completely ready for work.
|
||||
func NewFinalWriterProvider(prm FinalWriterProviderPrm, opts ...FinalWriterOption) *FinalWriterProvider {
|
||||
o := defaultFinalWriterOptionsOpts()
|
||||
|
||||
for i := range opts {
|
||||
opts[i](o)
|
||||
}
|
||||
|
||||
return &FinalWriterProvider{
|
||||
prm: prm,
|
||||
opts: o,
|
||||
}
|
||||
}
|
||||
|
||||
// FinalWriterProvider is an implementation of the reputation.eigentrust.calculator
|
||||
// IntermediateWriterProvider interface. It inits FinalWriter.
|
||||
type FinalWriterProvider struct {
|
||||
prm FinalWriterProviderPrm
|
||||
opts *finalWriterOptions
|
||||
}
|
||||
|
||||
func (fwp FinalWriterProvider) InitIntermediateWriter(
|
||||
_ eigentrustcalc.EpochIterationInfo) (eigentrustcalc.IntermediateWriter, error) {
|
||||
return &FinalWriter{
|
||||
privatKey: fwp.prm.PrivatKey,
|
||||
pubKey: fwp.prm.PubKey,
|
||||
client: fwp.prm.Client,
|
||||
l: fwp.opts.log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FinalWriter is an implementation of the reputation.eigentrust.calculator IntermediateWriter
|
||||
// interface that writes GlobalTrust to contract directly.
|
||||
type FinalWriter struct {
|
||||
privatKey *ecdsa.PrivateKey
|
||||
pubKey []byte
|
||||
client *repClient.Client
|
||||
|
||||
l *logger.Logger
|
||||
}
|
||||
|
||||
func (fw FinalWriter) WriteIntermediateTrust(t eigentrust.IterationTrust) error {
|
||||
fw.l.Debug("start writing global trusts to contract")
|
||||
|
||||
args := repClient.PutPrm{}
|
||||
|
||||
apiTrustedPeerID := t.Peer()
|
||||
|
||||
var apiTrust apireputation.Trust
|
||||
apiTrust.SetValue(t.Value().Float64())
|
||||
apiTrust.SetPeer(t.Peer())
|
||||
|
||||
var managerPublicKey [33]byte
|
||||
copy(managerPublicKey[:], fw.pubKey)
|
||||
|
||||
var apiMangerPeerID apireputation.PeerID
|
||||
apiMangerPeerID.SetPublicKey(managerPublicKey[:])
|
||||
|
||||
var gTrust apireputation.GlobalTrust
|
||||
gTrust.SetTrust(apiTrust)
|
||||
gTrust.SetManager(apiMangerPeerID)
|
||||
|
||||
err := gTrust.Sign(frostfsecdsa.Signer(*fw.privatKey))
|
||||
if err != nil {
|
||||
fw.l.Debug(
|
||||
"failed to sign global trust",
|
||||
zap.Error(err),
|
||||
)
|
||||
return fmt.Errorf("failed to sign global trust: %w", err)
|
||||
}
|
||||
|
||||
args.SetEpoch(t.Epoch())
|
||||
args.SetValue(gTrust)
|
||||
args.SetPeerID(apiTrustedPeerID)
|
||||
|
||||
err = fw.client.Put(
|
||||
args,
|
||||
)
|
||||
if err != nil {
|
||||
fw.l.Debug(
|
||||
"failed to write global trust to contract",
|
||||
zap.Error(err),
|
||||
)
|
||||
return fmt.Errorf("failed to write global trust to contract: %w", err)
|
||||
}
|
||||
|
||||
fw.l.Debug(
|
||||
"sent global trust to contract",
|
||||
zap.Uint64("epoch", t.Epoch()),
|
||||
zap.Float64("value", t.Value().Float64()),
|
||||
zap.Stringer("peer", t.Peer()),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type finalWriterOptions struct {
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
type FinalWriterOption func(*finalWriterOptions)
|
||||
|
||||
func defaultFinalWriterOptionsOpts() *finalWriterOptions {
|
||||
return &finalWriterOptions{
|
||||
log: &logger.Logger{Logger: zap.L()},
|
||||
}
|
||||
}
|
||||
|
||||
func FinalWriterWithLogger(l *logger.Logger) FinalWriterOption {
|
||||
return func(o *finalWriterOptions) {
|
||||
if l != nil {
|
||||
o.log = l
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package intermediate
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// DaughterStorageWriterProvider is an implementation of the reputation.WriterProvider
|
||||
// interface that provides DaughterTrustWriter writer.
|
||||
type DaughterStorageWriterProvider struct {
|
||||
Log *logger.Logger
|
||||
Storage *daughters.Storage
|
||||
}
|
||||
|
||||
// DaughterTrustWriter is an implementation of the reputation.Writer interface
|
||||
// that writes passed daughter's Trust values to Daughter storage. After writing
|
||||
// that, values can be used in eigenTrust algorithm's iterations.
|
||||
type DaughterTrustWriter struct {
|
||||
log *logger.Logger
|
||||
storage *daughters.Storage
|
||||
ep reputationcommon.EpochProvider
|
||||
}
|
||||
|
||||
func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error {
|
||||
w.log.Debug("writing received daughter's trusts",
|
||||
zap.Uint64("epoch", w.ep.Epoch()),
|
||||
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
||||
zap.Stringer("trusted_peer", t.Peer()),
|
||||
)
|
||||
|
||||
w.storage.Put(w.ep.Epoch(), t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *DaughterTrustWriter) Close(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DaughterStorageWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
|
||||
return &DaughterTrustWriter{
|
||||
log: s.Log,
|
||||
storage: s.Storage,
|
||||
ep: ep,
|
||||
}, nil
|
||||
}
|
|
@ -1,124 +0,0 @@
|
|||
package intermediate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
|
||||
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||
eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
reputationapi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// RemoteProviderPrm groups the required parameters of the RemoteProvider's constructor.
|
||||
//
|
||||
// All values must comply with the requirements imposed on them.
|
||||
// Passing incorrect parameter values will result in constructor
|
||||
// failure (error or panic depending on the implementation).
|
||||
type RemoteProviderPrm struct {
|
||||
Key *ecdsa.PrivateKey
|
||||
Log *logger.Logger
|
||||
}
|
||||
|
||||
// NewRemoteProvider creates a new instance of the RemoteProvider.
|
||||
//
|
||||
// Panics if at least one value of the parameters is invalid.
|
||||
//
|
||||
// The created RemoteProvider does not require additional
|
||||
// initialization and is completely ready for work.
|
||||
func NewRemoteProvider(prm RemoteProviderPrm) *RemoteProvider {
|
||||
switch {
|
||||
case prm.Key == nil:
|
||||
common.PanicOnPrmValue("NetMapSource", prm.Key)
|
||||
case prm.Log == nil:
|
||||
common.PanicOnPrmValue("Logger", prm.Log)
|
||||
}
|
||||
|
||||
return &RemoteProvider{
|
||||
key: prm.Key,
|
||||
log: prm.Log,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteProvider is an implementation of the clientKeyRemoteProvider interface.
|
||||
type RemoteProvider struct {
|
||||
key *ecdsa.PrivateKey
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
func (rp RemoteProvider) WithClient(c coreclient.Client) reputationcommon.WriterProvider {
|
||||
return &TrustWriterProvider{
|
||||
client: c,
|
||||
key: rp.key,
|
||||
log: rp.log,
|
||||
}
|
||||
}
|
||||
|
||||
type TrustWriterProvider struct {
|
||||
client coreclient.Client
|
||||
key *ecdsa.PrivateKey
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
func (twp *TrustWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
|
||||
iterInfo, ok := ep.(eigentrustcalc.EpochIterationInfo)
|
||||
if !ok {
|
||||
// TODO: #1164 think if this can be done without such limitation
|
||||
panic(ErrIncorrectContextPanicMsg)
|
||||
}
|
||||
|
||||
return &RemoteTrustWriter{
|
||||
iterInfo: iterInfo,
|
||||
client: twp.client,
|
||||
key: twp.key,
|
||||
log: twp.log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type RemoteTrustWriter struct {
|
||||
iterInfo eigentrustcalc.EpochIterationInfo
|
||||
client coreclient.Client
|
||||
key *ecdsa.PrivateKey
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// Write sends a trust value to a remote node via ReputationService.AnnounceIntermediateResult RPC.
|
||||
func (rtp *RemoteTrustWriter) Write(ctx context.Context, t reputation.Trust) error {
|
||||
epoch := rtp.iterInfo.Epoch()
|
||||
i := rtp.iterInfo.I()
|
||||
|
||||
rtp.log.Debug("announcing trust",
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.Uint32("iteration", i),
|
||||
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
||||
zap.Stringer("trusted_peer", t.Peer()),
|
||||
)
|
||||
|
||||
var apiTrust reputationapi.Trust
|
||||
apiTrust.SetValue(t.Value().Float64())
|
||||
apiTrust.SetPeer(t.Peer())
|
||||
|
||||
var apiPeerToPeerTrust reputationapi.PeerToPeerTrust
|
||||
apiPeerToPeerTrust.SetTrustingPeer(t.TrustingPeer())
|
||||
apiPeerToPeerTrust.SetTrust(apiTrust)
|
||||
|
||||
var p internalclient.AnnounceIntermediatePrm
|
||||
|
||||
p.SetClient(rtp.client)
|
||||
p.SetEpoch(epoch)
|
||||
p.SetIteration(i)
|
||||
p.SetTrust(apiPeerToPeerTrust)
|
||||
|
||||
_, err := internalclient.AnnounceIntermediate(ctx, p)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (rtp *RemoteTrustWriter) Close(context.Context) error {
|
||||
return nil
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
package intermediate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
|
||||
consumerstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/consumers"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
|
||||
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||
)
|
||||
|
||||
// DaughterTrustIteratorProvider is an implementation of the
|
||||
// reputation/eigentrust/calculator's DaughterTrustIteratorProvider interface.
|
||||
type DaughterTrustIteratorProvider struct {
|
||||
DaughterStorage *daughters.Storage
|
||||
ConsumerStorage *consumerstorage.Storage
|
||||
}
|
||||
|
||||
// InitDaughterIterator returns an iterator over the received
|
||||
// local trusts for ctx.Epoch() epoch from daughter p.
|
||||
func (ip *DaughterTrustIteratorProvider) InitDaughterIterator(ctx eigentrustcalc.EpochIterationInfo,
|
||||
p apireputation.PeerID) (eigentrustcalc.TrustIterator, error) {
|
||||
epoch := ctx.Epoch()
|
||||
|
||||
daughterIterator, ok := ip.DaughterStorage.DaughterTrusts(epoch, p)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no data in %d epoch for daughter: %s", epoch, p)
|
||||
}
|
||||
|
||||
return daughterIterator, nil
|
||||
}
|
||||
|
||||
// InitAllDaughtersIterator returns an iterator over all
|
||||
// daughters of the current node(manager) and all local
|
||||
// trusts received from them for ctx.Epoch() epoch.
|
||||
func (ip *DaughterTrustIteratorProvider) InitAllDaughtersIterator(
|
||||
ctx eigentrustcalc.EpochIterationInfo) (eigentrustcalc.PeerTrustsIterator, error) {
|
||||
epoch := ctx.Epoch()
|
||||
|
||||
iter, ok := ip.DaughterStorage.AllDaughterTrusts(epoch)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no data in %d epoch for daughters", epoch)
|
||||
}
|
||||
|
||||
return iter, nil
|
||||
}
|
||||
|
||||
// InitConsumersIterator returns an iterator over all daughters
|
||||
// of the current node(manager) and all their consumers' local
|
||||
// trusts for ctx.Epoch() epoch and ctx.I() iteration.
|
||||
func (ip *DaughterTrustIteratorProvider) InitConsumersIterator(
|
||||
ctx eigentrustcalc.EpochIterationInfo) (eigentrustcalc.PeerTrustsIterator, error) {
|
||||
epoch, iter := ctx.Epoch(), ctx.I()
|
||||
|
||||
consumerIterator, ok := ip.ConsumerStorage.Consumers(epoch, iter)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no data for %d iteration in %d epoch for consumers's trusts",
|
||||
iter,
|
||||
epoch,
|
||||
)
|
||||
}
|
||||
|
||||
return consumerIterator, nil
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||
)
|
||||
|
||||
type commonPrm struct {
|
||||
cli coreclient.Client
|
||||
}
|
||||
|
||||
// SetClient sets the base client for FrostFS API communication.
|
||||
//
|
||||
// Required parameter.
|
||||
func (x *commonPrm) SetClient(cli coreclient.Client) {
|
||||
x.cli = cli
|
||||
}
|
||||
|
||||
// AnnounceLocalPrm groups parameters of AnnounceLocal operation.
|
||||
type AnnounceLocalPrm struct {
|
||||
commonPrm
|
||||
|
||||
cliPrm client.PrmAnnounceLocalTrust
|
||||
}
|
||||
|
||||
// SetEpoch sets the epoch in which the trust was assessed.
|
||||
func (x *AnnounceLocalPrm) SetEpoch(epoch uint64) {
|
||||
x.cliPrm.SetEpoch(epoch)
|
||||
}
|
||||
|
||||
// SetTrusts sets a list of local trust values.
|
||||
func (x *AnnounceLocalPrm) SetTrusts(ts []reputation.Trust) {
|
||||
x.cliPrm.SetValues(ts)
|
||||
}
|
||||
|
||||
// AnnounceLocalRes groups the resulting values of AnnounceLocal operation.
|
||||
type AnnounceLocalRes struct{}
|
||||
|
||||
// AnnounceLocal sends estimations of local trust to the remote node.
|
||||
//
|
||||
// Client, context and key must be set.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func AnnounceLocal(ctx context.Context, prm AnnounceLocalPrm) (res AnnounceLocalRes, err error) {
|
||||
var cliRes *client.ResAnnounceLocalTrust
|
||||
|
||||
cliRes, err = prm.cli.AnnounceLocalTrust(ctx, prm.cliPrm)
|
||||
if err == nil {
|
||||
// pull out an error from status
|
||||
err = apistatus.ErrFromStatus(cliRes.Status())
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// AnnounceIntermediatePrm groups parameters of AnnounceIntermediate operation.
|
||||
type AnnounceIntermediatePrm struct {
|
||||
commonPrm
|
||||
|
||||
cliPrm client.PrmAnnounceIntermediateTrust
|
||||
}
|
||||
|
||||
// SetEpoch sets the number of the epoch when the trust calculation's iteration was executed.
|
||||
func (x *AnnounceIntermediatePrm) SetEpoch(epoch uint64) {
|
||||
x.cliPrm.SetEpoch(epoch)
|
||||
}
|
||||
|
||||
// SetIteration sets the number of the iteration of the trust calculation algorithm.
|
||||
func (x *AnnounceIntermediatePrm) SetIteration(iter uint32) {
|
||||
x.cliPrm.SetIteration(iter)
|
||||
}
|
||||
|
||||
// SetTrust sets the current global trust value computed at the iteration.
|
||||
func (x *AnnounceIntermediatePrm) SetTrust(t reputation.PeerToPeerTrust) {
|
||||
x.cliPrm.SetCurrentValue(t)
|
||||
}
|
||||
|
||||
// AnnounceIntermediateRes groups the resulting values of AnnounceIntermediate operation.
|
||||
type AnnounceIntermediateRes struct{}
|
||||
|
||||
// AnnounceIntermediate sends the global trust value calculated at the specified iteration
|
||||
// and epoch to to the remote node.
|
||||
//
|
||||
// Client, context and key must be set.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func AnnounceIntermediate(ctx context.Context, prm AnnounceIntermediatePrm) (res AnnounceIntermediateRes, err error) {
|
||||
var cliRes *client.ResAnnounceIntermediateTrust
|
||||
|
||||
cliRes, err = prm.cli.AnnounceIntermediateTrust(ctx, prm.cliPrm)
|
||||
if err == nil {
|
||||
// pull out an error from status
|
||||
err = apistatus.ErrFromStatus(cliRes.Status())
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
// Package internal provides functionality for FrostFS Node Reputation system communication with FrostFS network.
|
||||
// The base client for accessing remote nodes via FrostFS API is a FrostFS SDK Go API client.
|
||||
// However, although it encapsulates a useful piece of business logic (e.g. the signature mechanism),
|
||||
// the Reputation service does not fully use the client's flexible interface.
|
||||
//
|
||||
// In this regard, this package provides functions over base API client necessary for the application.
|
||||
// This allows you to concentrate the entire spectrum of the client's use in one place (this will be convenient
|
||||
// both when updating the base client and for evaluating the UX of SDK library). So, it is expected that all
|
||||
// Reputation service packages will be limited to this package for the development of functionality requiring
|
||||
// FrostFS API communication.
|
||||
package internal
|
|
@ -1,112 +0,0 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
|
||||
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
reputationapi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// RemoteProviderPrm groups the required parameters of the RemoteProvider's constructor.
|
||||
//
|
||||
// All values must comply with the requirements imposed on them.
|
||||
// Passing incorrect parameter values will result in constructor
|
||||
// failure (error or panic depending on the implementation).
|
||||
type RemoteProviderPrm struct {
|
||||
Key *ecdsa.PrivateKey
|
||||
Log *logger.Logger
|
||||
}
|
||||
|
||||
// NewRemoteProvider creates a new instance of the RemoteProvider.
|
||||
//
|
||||
// Panics if at least one value of the parameters is invalid.
|
||||
//
|
||||
// The created RemoteProvider does not require additional
|
||||
// initialization and is completely ready for work.
|
||||
func NewRemoteProvider(prm RemoteProviderPrm) *RemoteProvider {
|
||||
switch {
|
||||
case prm.Key == nil:
|
||||
common.PanicOnPrmValue("NetMapSource", prm.Key)
|
||||
case prm.Log == nil:
|
||||
common.PanicOnPrmValue("Logger", prm.Log)
|
||||
}
|
||||
|
||||
return &RemoteProvider{
|
||||
key: prm.Key,
|
||||
log: prm.Log,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteProvider is an implementation of the clientKeyRemoteProvider interface.
|
||||
type RemoteProvider struct {
|
||||
key *ecdsa.PrivateKey
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
func (rp RemoteProvider) WithClient(c coreclient.Client) reputationcommon.WriterProvider {
|
||||
return &TrustWriterProvider{
|
||||
client: c,
|
||||
key: rp.key,
|
||||
log: rp.log,
|
||||
}
|
||||
}
|
||||
|
||||
type TrustWriterProvider struct {
|
||||
client coreclient.Client
|
||||
key *ecdsa.PrivateKey
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
func (twp *TrustWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
|
||||
return &RemoteTrustWriter{
|
||||
ep: ep,
|
||||
client: twp.client,
|
||||
key: twp.key,
|
||||
log: twp.log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type RemoteTrustWriter struct {
|
||||
ep reputationcommon.EpochProvider
|
||||
client coreclient.Client
|
||||
key *ecdsa.PrivateKey
|
||||
log *logger.Logger
|
||||
|
||||
buf []reputationapi.Trust
|
||||
}
|
||||
|
||||
func (rtp *RemoteTrustWriter) Write(_ context.Context, t reputation.Trust) error {
|
||||
var apiTrust reputationapi.Trust
|
||||
|
||||
apiTrust.SetValue(t.Value().Float64())
|
||||
apiTrust.SetPeer(t.Peer())
|
||||
|
||||
rtp.buf = append(rtp.buf, apiTrust)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rtp *RemoteTrustWriter) Close(ctx context.Context) error {
|
||||
epoch := rtp.ep.Epoch()
|
||||
|
||||
rtp.log.Debug("announcing trusts",
|
||||
zap.Uint64("epoch", epoch),
|
||||
)
|
||||
|
||||
var prm internalclient.AnnounceLocalPrm
|
||||
|
||||
prm.SetClient(rtp.client)
|
||||
prm.SetEpoch(epoch)
|
||||
prm.SetTrusts(rtp.buf)
|
||||
|
||||
_, err := internalclient.AnnounceLocal(ctx, prm)
|
||||
|
||||
return err
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||
trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller"
|
||||
truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type TrustStorage struct {
|
||||
Log *logger.Logger
|
||||
|
||||
Storage *truststorage.Storage
|
||||
|
||||
NmSrc netmapcore.Source
|
||||
|
||||
LocalKey []byte
|
||||
}
|
||||
|
||||
func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) {
|
||||
epoch := ep.Epoch()
|
||||
|
||||
s.Log.Debug("initializing iterator over trusts",
|
||||
zap.Uint64("epoch", epoch),
|
||||
)
|
||||
|
||||
epochStorage, err := s.Storage.DataForEpoch(epoch)
|
||||
if err != nil && !errors.Is(err, truststorage.ErrNoPositiveTrust) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TrustIterator{
|
||||
ep: ep,
|
||||
storage: s,
|
||||
epochStorage: epochStorage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type TrustIterator struct {
|
||||
ep reputationcommon.EpochProvider
|
||||
|
||||
storage *TrustStorage
|
||||
|
||||
epochStorage *truststorage.EpochTrustValueStorage
|
||||
}
|
||||
|
||||
func (it *TrustIterator) Iterate(h reputation.TrustHandler) error {
|
||||
if it.epochStorage != nil {
|
||||
err := it.epochStorage.Iterate(h)
|
||||
if !errors.Is(err, truststorage.ErrNoPositiveTrust) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nm, err := it.storage.NmSrc.GetNetMapByEpoch(it.ep.Epoch())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find out if local node is presented in netmap
|
||||
localIndex := -1
|
||||
|
||||
nmNodes := nm.Nodes()
|
||||
for i := range nmNodes {
|
||||
if bytes.Equal(nmNodes[i].PublicKey(), it.storage.LocalKey) {
|
||||
localIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ln := len(nmNodes)
|
||||
if localIndex >= 0 && ln > 0 {
|
||||
ln--
|
||||
}
|
||||
|
||||
// calculate Pj http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Chapter 4.5.
|
||||
p := reputation.TrustOne.Div(reputation.TrustValueFromInt(ln))
|
||||
|
||||
for i := range nmNodes {
|
||||
if i == localIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
var trusted, trusting apireputation.PeerID
|
||||
|
||||
trusted.SetPublicKey(nmNodes[i].PublicKey())
|
||||
trusting.SetPublicKey(it.storage.LocalKey)
|
||||
|
||||
trust := reputation.Trust{}
|
||||
trust.SetPeer(trusted)
|
||||
trust.SetValue(p)
|
||||
trust.SetTrustingPeer(trusting)
|
||||
|
||||
if err := h(trust); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
package ticker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// IterationHandler is a callback of a certain block advance.
|
||||
type IterationHandler func()
|
||||
|
||||
// IterationsTicker represents a fixed tick number block timer.
|
||||
//
|
||||
// It can tick the blocks and perform certain actions
|
||||
// on block time intervals.
|
||||
type IterationsTicker struct {
|
||||
m sync.Mutex
|
||||
|
||||
curr uint64
|
||||
period uint64
|
||||
|
||||
times uint64
|
||||
|
||||
h IterationHandler
|
||||
}
|
||||
|
||||
// NewIterationsTicker creates a new IterationsTicker.
|
||||
//
|
||||
// It guaranties that a handler would be called the
|
||||
// specified amount of times in the specified amount
|
||||
// of blocks. After the last meaningful Tick, IterationsTicker
|
||||
// becomes no-op timer.
|
||||
//
|
||||
// Returns an error only if times is greater than totalBlocks.
|
||||
func NewIterationsTicker(totalBlocks uint64, times uint64, h IterationHandler) (*IterationsTicker, error) {
|
||||
period := totalBlocks / times
|
||||
|
||||
if period == 0 {
|
||||
return nil, fmt.Errorf("impossible to tick %d times in %d blocks",
|
||||
times, totalBlocks,
|
||||
)
|
||||
}
|
||||
|
||||
var curr uint64
|
||||
|
||||
// try to make handler calls as rare as possible
|
||||
if totalBlocks%times != 0 {
|
||||
extraBlocks := (period+1)*times - totalBlocks
|
||||
|
||||
if period >= extraBlocks {
|
||||
curr = extraBlocks + (period-extraBlocks)/2
|
||||
period++
|
||||
}
|
||||
}
|
||||
|
||||
return &IterationsTicker{
|
||||
curr: curr,
|
||||
period: period,
|
||||
times: times,
|
||||
h: h,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Tick ticks one block in the IterationsTicker.
|
||||
//
|
||||
// Returns `false` if the timer has finished its operations
|
||||
// and there will be no more handler calls.
|
||||
// Calling Tick after the returned `false` is safe, no-op
|
||||
// and also returns `false`.
|
||||
func (ft *IterationsTicker) Tick() bool {
|
||||
ft.m.Lock()
|
||||
defer ft.m.Unlock()
|
||||
|
||||
if ft.times == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
ft.curr++
|
||||
|
||||
if ft.curr%ft.period == 0 {
|
||||
ft.h()
|
||||
|
||||
ft.times--
|
||||
|
||||
if ft.times == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
package ticker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFixedTimer_Tick(t *testing.T) {
|
||||
tests := [...]struct {
|
||||
duration uint64
|
||||
times uint64
|
||||
err error
|
||||
}{
|
||||
{
|
||||
duration: 20,
|
||||
times: 4,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
duration: 6,
|
||||
times: 6,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
duration: 10,
|
||||
times: 6,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
duration: 5,
|
||||
times: 6,
|
||||
err: errors.New("impossible to tick 6 times in 5 blocks"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("duration:%d,times:%d", test.duration, test.times), func(t *testing.T) {
|
||||
counter := uint64(0)
|
||||
|
||||
timer, err := NewIterationsTicker(test.duration, test.times, func() {
|
||||
counter++
|
||||
})
|
||||
if test.err != nil {
|
||||
require.EqualError(t, err, test.err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(test.duration); i++ {
|
||||
if !timer.Tick() {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, false, timer.Tick())
|
||||
require.Equal(t, test.times, counter)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFixedTimer_RareCalls(t *testing.T) {
|
||||
tests := [...]struct {
|
||||
duration uint64
|
||||
times uint64
|
||||
firstCall uint64
|
||||
period uint64
|
||||
}{
|
||||
{
|
||||
duration: 11,
|
||||
times: 6,
|
||||
firstCall: 1,
|
||||
period: 2,
|
||||
},
|
||||
{
|
||||
duration: 11,
|
||||
times: 4,
|
||||
firstCall: 2,
|
||||
period: 3,
|
||||
},
|
||||
{
|
||||
duration: 20,
|
||||
times: 3,
|
||||
firstCall: 4,
|
||||
period: 7,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("duration:%d,times:%d", test.duration, test.times), func(t *testing.T) {
|
||||
var counter uint64
|
||||
|
||||
timer, err := NewIterationsTicker(test.duration, test.times, func() {
|
||||
counter++
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
checked := false
|
||||
|
||||
for i := 1; i <= int(test.duration); i++ {
|
||||
if !timer.Tick() {
|
||||
break
|
||||
}
|
||||
|
||||
if !checked && counter == 1 {
|
||||
require.Equal(t, test.firstCall, uint64(i))
|
||||
checked = true
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, false, timer.Tick())
|
||||
require.Equal(t, test.times, counter)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker"
|
||||
)
|
||||
|
||||
type eigenTrustTickers struct {
|
||||
m sync.Mutex
|
||||
|
||||
timers map[uint64]*ticker.IterationsTicker
|
||||
}
|
||||
|
||||
func (e *eigenTrustTickers) addEpochTimer(epoch uint64, timer *ticker.IterationsTicker) {
|
||||
e.m.Lock()
|
||||
defer e.m.Unlock()
|
||||
|
||||
e.timers[epoch] = timer
|
||||
}
|
||||
|
||||
func (e *eigenTrustTickers) tick() {
|
||||
e.m.Lock()
|
||||
defer e.m.Unlock()
|
||||
|
||||
for epoch, t := range e.timers {
|
||||
if !t.Tick() {
|
||||
delete(e.timers, epoch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tickBlockTimers(c *cfg) {
|
||||
c.cfgMorph.eigenTrustTicker.tick()
|
||||
}
|
||||
|
||||
func newEigenTrustIterTimer(c *cfg) {
|
||||
c.cfgMorph.eigenTrustTicker = &eigenTrustTickers{
|
||||
// it is expected to have max 2 concurrent epoch
|
||||
// in normal mode work
|
||||
timers: make(map[uint64]*ticker.IterationsTicker, 2),
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue