Compare commits

..

No commits in common. "master" and "fix/cli_apemngr" have entirely different histories.

292 changed files with 3048 additions and 3007 deletions

View file

@ -1,28 +0,0 @@
name: OCI image
on:
push:
workflow_dispatch:
jobs:
image:
name: Build container images
runs-on: docker
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Build OCI image
run: make images
- name: Push image to OCI registry
run: |
echo "$REGISTRY_PASSWORD" \
| docker login --username truecloudlab --password-stdin git.frostfs.info
make push-images
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}

View file

@ -18,7 +18,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23.5'
go-version: '1.23'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -89,7 +89,5 @@ linters:
- protogetter
- intrange
- tenv
- unconvert
- unparam
disable-all: true
fast: false

View file

@ -9,30 +9,6 @@ Changelog for FrostFS Node
### Removed
### Updated
## [v0.44.0] - 2024-25-11 - Rongbuk
### Added
- Allow to prioritize nodes during GET traversal via attributes (#1439)
- Add metrics for the frostfsid cache (#1464)
- Customize constant attributes attached to every tracing span (#1488)
- Manage additional keys in the `frostfsid` contract (#1505)
- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519)
### Changed
- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396)
- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515)
### Fixed
- Fix EC object search (#1408)
- Fix EC object put when one of the nodes is unavailable (#1427)
### Removed
- Drop most of the eACL-related code (#1425)
- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483)
### Upgrading from v0.43.0
The metabase schema has changed completely, resync is required.
## [v0.42.0]
### Added

View file

@ -1,3 +0,0 @@
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.62.2
LINT_VERSION ?= 1.62.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
@ -139,15 +139,6 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
# Push FrostFS components' docker image to the registry
push-image-%:
@echo "⇒ Publish FrostFS $* docker image "
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
# Push all Docker images to the registry
.PHONY: push-images
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
# Run `make %` in Golang container
docker/%:
docker run --rm -t \
@ -279,12 +270,10 @@ env-up: all
echo "Frostfs contracts not found"; exit 1; \
fi
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
--storage-wallet ./dev/storage/wallet01.json \
--storage-wallet ./dev/storage/wallet02.json \
--storage-wallet ./dev/storage/wallet03.json \
--storage-wallet ./dev/storage/wallet04.json
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
make locode-download; \
fi

View file

@ -1 +1 @@
v0.44.0
v0.42.0

View file

@ -28,7 +28,6 @@ const (
var (
errNoPathsFound = errors.New("no metabase paths found")
errNoMorphEndpointsFound = errors.New("no morph endpoints found")
errUpgradeFailed = errors.New("upgrade failed")
)
var UpgradeCmd = &cobra.Command{
@ -92,19 +91,14 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil {
return err
}
allSuccess := true
for mb, ok := range result {
if ok {
cmd.Println(mb, ": success")
} else {
cmd.Println(mb, ": failed")
allSuccess = false
}
}
if allSuccess {
return nil
}
return errUpgradeFailed
return nil
}
func getMetabasePaths(appCfg *config.Config) ([]string, error) {
@ -141,7 +135,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er
if err != nil {
return nil, fmt.Errorf("resolve container contract hash: %w", err)
}
cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
if err != nil {
return nil, fmt.Errorf("create morph container client: %w", err)
}

View file

@ -8,7 +8,7 @@ import (
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@ -200,7 +200,7 @@ func listRuleChains(cmd *cobra.Command, _ []string) {
func setAdmin(cmd *cobra.Command, _ []string) {
s, _ := cmd.Flags().GetString(addrAdminFlag)
addr, err := address.StringToUint160(s)
addr, err := util.Uint160DecodeStringLE(s)
commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err)
pci, ac := newPolicyContractInterface(cmd)
h, vub, err := pci.SetAdmin(addr)
@ -214,7 +214,7 @@ func getAdmin(cmd *cobra.Command, _ []string) {
pci, _ := newPolicyContractReaderInterface(cmd)
addr, err := pci.GetAdmin()
commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err)
cmd.Println(address.Uint160ToString(addr))
cmd.Println(addr.StringLE())
}
func listTargets(cmd *cobra.Command, _ []string) {

View file

@ -53,15 +53,16 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
}
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
inv := invoker.New(c, nil)
var ch util.Uint160
r := management.NewReader(inv)
nnsCs, err := helper.GetContractByID(r, 1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
invokerAdapter := &invokerAdapter{
@ -73,7 +74,7 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
}
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName)

View file

@ -51,7 +51,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
nmHash util.Uint160
)
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return err
}

View file

@ -26,7 +26,7 @@ import (
const forceConfigSet = "force"
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}

View file

@ -76,7 +76,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("invalid filename: %w", err)
}
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
@ -157,7 +157,7 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
}
func listContainers(cmd *cobra.Command, _ []string) error {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}

View file

@ -36,7 +36,7 @@ type contractDumpInfo struct {
}
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}

View file

@ -1,6 +1,7 @@
package frostfsid
import (
"errors"
"fmt"
"math/big"
"sort"
@ -253,7 +254,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items)
@ -305,7 +306,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID))
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
@ -319,7 +320,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
@ -365,7 +366,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
@ -415,7 +416,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
@ -488,28 +489,32 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
}
f.bw.Reset()
if len(f.wCtx.SentTxs) == 0 {
return nil, errors.New("no transactions to wait")
}
f.wCtx.Command.Println("Waiting for transactions to persist...")
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
}
func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) {
func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool
res := make([]stackitem.Item, 0)
for !shouldStop {
items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize)
items, err := inv.TraverseIterator(sessionID, iter, batchSize)
if err != nil {
return nil, err
}
res = append(res, items...)
shouldStop = len(items) < iteratorBatchSize
shouldStop = len(items) < batchSize
}
return res, nil
}
func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil)

View file

@ -1,12 +1,59 @@
package frostfsid
import (
"encoding/hex"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
)
func TestFrostfsIDConfig(t *testing.T) {
pks := make([]*keys.PrivateKey, 4)
for i := range pks {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
pks[i] = pk
}
fmts := []string{
pks[0].GetScriptHash().StringLE(),
address.Uint160ToString(pks[1].GetScriptHash()),
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
hex.EncodeToString(pks[3].PublicKey().Bytes()),
}
for i := range fmts {
v := viper.New()
v.Set("frostfsid.admin", fmts[i])
actual, found, err := helper.GetFrostfsIDAdmin(v)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, pks[i].GetScriptHash(), actual)
}
t.Run("bad key", func(t *testing.T) {
v := viper.New()
v.Set("frostfsid.admin", "abc")
_, found, err := helper.GetFrostfsIDAdmin(v)
require.Error(t, err)
require.True(t, found)
})
t.Run("missing key", func(t *testing.T) {
v := viper.New()
_, found, err := helper.GetFrostfsIDAdmin(v)
require.NoError(t, err)
require.False(t, found)
})
}
func TestNamespaceRegexp(t *testing.T) {
for _, tc := range []struct {
name string

View file

@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
@ -140,29 +141,60 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
}
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
w, err := wallet.NewWallet(walletPath)
if err != nil {
return fmt.Errorf("create wallet: %w", err)
}
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
password, err := config.GetStoragePassword(viper.GetViper(), label)
if err != nil {
return fmt.Errorf("can't fetch password: %w", err)
}
if label == "" {
label = constants.SingleAccountName
}
if err := w.CreateAccount(label, password); err != nil {
return fmt.Errorf("can't create account: %w", err)
}
return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
return refillGas(cmd, storageGasConfigFlag, true)
}
func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
// storage wallet path is not part of the config
storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
// wallet address is not part of the config
walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
var gasReceiver util.Uint160
if len(walletAddress) != 0 {
gasReceiver, err = address.StringToUint160(walletAddress)
if err != nil {
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
}
} else {
if storageWalletPath == "" {
return fmt.Errorf("missing wallet path (use '--%s <out.json>')", commonflags.StorageWalletFlag)
}
var w *wallet.Wallet
if createWallet {
w, err = wallet.NewWallet(storageWalletPath)
} else {
w, err = wallet.NewWalletFromFile(storageWalletPath)
}
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
if createWallet {
var password string
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
password, err := config.GetStoragePassword(viper.GetViper(), label)
if err != nil {
return fmt.Errorf("can't fetch password: %w", err)
}
if label == "" {
label = constants.SingleAccountName
}
if err := w.CreateAccount(label, password); err != nil {
return fmt.Errorf("can't create account: %w", err)
}
}
gasReceiver = w.Accounts[0].Contract.ScriptHash()
}
gasStr := viper.GetString(gasFlag)
gasAmount, err := helper.ParseGASAmount(gasStr)
@ -176,11 +208,9 @@ func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160)
}
bw := io.NewBufBinWriter()
for _, gasReceiver := range gasReceivers {
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
}
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
if bw.Err != nil {
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
}

View file

@ -1,12 +1,7 @@
package generate
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@ -38,27 +33,7 @@ var (
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
},
RunE: func(cmd *cobra.Command, _ []string) error {
storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
var gasReceivers []util.Uint160
for _, walletAddress := range walletAddresses {
addr, err := address.StringToUint160(walletAddress)
if err != nil {
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
}
gasReceivers = append(gasReceivers, addr)
}
for _, storageWalletPath := range storageWalletPaths {
w, err := wallet.NewWalletFromFile(storageWalletPath)
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
}
return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
},
}
GenerateAlphabetCmd = &cobra.Command{
@ -75,10 +50,10 @@ var (
func initRefillGasCmd() {
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
}
func initGenerateStorageCmd() {

View file

@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any
h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker)
}
if method != constants.UpdateMethodName || err == nil && !found {
h, found, err = getFrostfsIDAdmin(viper.GetViper())
h, found, err = GetFrostfsIDAdmin(viper.GetViper())
}
if err != nil {
return nil, err

View file

@ -11,7 +11,7 @@ import (
const frostfsIDAdminConfigKey = "frostfsid.admin"
func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
admin := v.GetString(frostfsIDAdminConfigKey)
if admin == "" {
return util.Uint160{}, false, nil

View file

@ -1,53 +0,0 @@
package helper
import (
"encoding/hex"
"testing"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
)
func TestFrostfsIDConfig(t *testing.T) {
pks := make([]*keys.PrivateKey, 4)
for i := range pks {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
pks[i] = pk
}
fmts := []string{
pks[0].GetScriptHash().StringLE(),
address.Uint160ToString(pks[1].GetScriptHash()),
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
hex.EncodeToString(pks[3].PublicKey().Bytes()),
}
for i := range fmts {
v := viper.New()
v.Set("frostfsid.admin", fmts[i])
actual, found, err := getFrostfsIDAdmin(v)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, pks[i].GetScriptHash(), actual)
}
t.Run("bad key", func(t *testing.T) {
v := viper.New()
v.Set("frostfsid.admin", "abc")
_, found, err := getFrostfsIDAdmin(v)
require.Error(t, err)
require.True(t, found)
})
t.Run("missing key", func(t *testing.T) {
v := viper.New()
_, found, err := getFrostfsIDAdmin(v)
require.NoError(t, err)
require.False(t, found)
})
}

View file

@ -134,12 +134,12 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex
return nil, err
}
accounts, err := getSingleAccounts(wallets)
accounts, err := createWalletAccounts(wallets)
if err != nil {
return nil, err
}
cliCtx, err := defaultClientContext(c, committeeAcc)
cliCtx, err := DefaultClientContext(c, committeeAcc)
if err != nil {
return nil, fmt.Errorf("client context: %w", err)
}
@ -191,7 +191,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet)
}
c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String())
} else {
c, err = NewRemoteClient(v)
c, err = GetN3Client(v)
}
if err != nil {
return nil, fmt.Errorf("can't create N3 client: %w", err)
@ -211,7 +211,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
return ctrPath, nil
}
func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
accounts := make([]*wallet.Account, len(wallets))
for i, w := range wallets {
acc, err := GetWalletAccount(w, constants.SingleAccountName)

View file

@ -58,59 +58,17 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
return nil, err
}
go bc.Run()
accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets)
if err != nil {
return nil, err
}
if cmd.Name() != "init" {
if err := restoreDump(bc, dumpPath); err != nil {
return nil, fmt.Errorf("restore dump: %w", err)
}
}
return &LocalClient{
bc: bc,
dumpPath: dumpPath,
accounts: accounts,
}, nil
}
func restoreDump(bc *core.Blockchain, dumpPath string) error {
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
if err != nil {
return fmt.Errorf("can't open local dump: %w", err)
}
defer f.Close()
r := io.NewBinReaderFromIO(f)
var skip uint32
if bc.BlockHeight() != 0 {
skip = bc.BlockHeight() + 1
}
count := r.ReadU32LE() - skip
if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
return err
}
return nil
}
func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) {
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount))
accounts := make([]*wallet.Account, len(wallets))
for i := range accounts {
acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName)
accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName)
if err != nil {
return nil, err
}
accounts[i] = acc
}
indexMap := make(map[string]int)
for i, pub := range cfg.StandbyCommittee {
for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee {
indexMap[pub] = i
}
@ -119,12 +77,37 @@ func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet
pj := accounts[j].PrivateKey().PublicKey().Bytes()
return indexMap[string(pi)] < indexMap[string(pj)]
})
sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool {
sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool {
return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
})
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount))
return accounts[:m], nil
go bc.Run()
if cmd.Name() != "init" {
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
if err != nil {
return nil, fmt.Errorf("can't open local dump: %w", err)
}
defer f.Close()
r := io.NewBinReaderFromIO(f)
var skip uint32
if bc.BlockHeight() != 0 {
skip = bc.BlockHeight() + 1
}
count := r.ReadU32LE() - skip
if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
return nil, fmt.Errorf("can't restore local dump: %w", err)
}
}
return &LocalClient{
bc: bc,
dumpPath: dumpPath,
accounts: accounts[:m],
}, nil
}
func (l *LocalClient) GetBlockCount() (uint32, error) {
@ -145,6 +128,11 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul
return &a, nil
}
func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) {
// not used by `morph init` command
panic("unexpected call")
}
// InvokeFunction is implemented via `InvokeScript`.
func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) {
var err error
@ -308,7 +296,13 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer)
}
func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) {
tx = tx.Copy()
// We need to test that transaction was formed correctly to catch as many errors as we can.
bs := tx.Bytes()
_, err := transaction.NewTransactionFromBytes(bs)
if err != nil {
return tx.Hash(), fmt.Errorf("invalid transaction: %w", err)
}
l.transactions = append(l.transactions, tx)
return tx.Hash(), nil
}

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
@ -24,10 +25,15 @@ import (
// Client represents N3 client interface capable of test-invoking scripts
// and sending signed transactions to chain.
type Client interface {
actor.RPCActor
invoker.RPCInvoke
GetBlockCount() (uint32, error)
GetNativeContracts() ([]state.Contract, error)
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
GetVersion() (*result.Version, error)
SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
GetCommittee() (keys.PublicKeys, error)
CalculateNetworkFee(tx *transaction.Transaction) (int64, error)
}
type HashVUBPair struct {
@ -42,7 +48,7 @@ type ClientContext struct {
SentTxs []HashVUBPair
}
func NewRemoteClient(v *viper.Viper) (Client, error) {
func GetN3Client(v *viper.Viper) (Client, error) {
// number of opened connections
// by neo-go client per one host
const (
@ -82,14 +88,8 @@ func NewRemoteClient(v *viper.Viper) (Client, error) {
return c, nil
}
func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
commAct, err := actor.New(c, []actor.SignerAccount{{
Signer: transaction.Signer{
Account: committeeAcc.Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: committeeAcc,
}})
func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
commAct, err := NewActor(c, committeeAcc)
if err != nil {
return nil, err
}

View file

@ -15,8 +15,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/viper"
@ -85,6 +87,16 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
return wallets, nil
}
func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) {
return actor.New(c, []actor.SignerAccount{{
Signer: transaction.Signer{
Account: committeeAcc.Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: committeeAcc,
}})
}
func ReadContract(ctrPath, ctrName string) (*ContractState, error) {
rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef"))
if err != nil {

View file

@ -13,7 +13,7 @@ import (
)
func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil)

View file

@ -13,7 +13,7 @@ import (
func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
v := viper.GetViper()
c, err := helper.NewRemoteClient(v)
c, err := helper.GetN3Client(v)
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName)
@ -26,7 +26,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
}
func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
inv := invoker.New(c, nil)

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"math/big"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@ -40,8 +41,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
}
accHash := w.GetChangeAddress()
addr, _ := cmd.Flags().GetString(walletAccountFlag)
if addr != "" {
if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
accHash, err = address.StringToUint160(addr)
if err != nil {
return fmt.Errorf("invalid address: %s", addr)
@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't find account for %s", accHash)
}
prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
pass, err := input.ReadPassword(prompt)
if err != nil {
return fmt.Errorf("can't get password: %v", err)
@ -73,16 +73,23 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return err
}
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
if till <= 0 {
return errInvalidNotaryDepositLifetime
till := int64(defaultNotaryDepositLifetime)
tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
if err != nil {
return err
}
if tillStr != "" {
till, err = strconv.ParseInt(tillStr, 10, 64)
if err != nil || till <= 0 {
return errInvalidNotaryDepositLifetime
}
}
return transferGas(cmd, acc, accHash, gasAmount, till)
}
func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
if err != nil {
return err
}

View file

@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
}
func init() {

View file

@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error {
}
func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
c, err := helper.NewRemoteClient(viper.GetViper())
c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client:", err)
inv := invoker.New(c, nil)

View file

@ -20,32 +20,23 @@ const (
accountAddressFlag = "account"
)
func parseAddresses(cmd *cobra.Command) []util.Uint160 {
var addrs []util.Uint160
accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
for _, acc := range accs {
addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
addrs = append(addrs, addr)
}
return addrs
}
func addProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd)
err := processAccount(cmd, addrs, "addAccount")
acc, _ := cmd.Flags().GetString(accountAddressFlag)
addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "addAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
func removeProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd)
err := processAccount(cmd, addrs, "removeAccount")
acc, _ := cmd.Flags().GetString(accountAddressFlag)
addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "removeAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error {
func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
@ -63,9 +54,7 @@ func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) err
}
bw := io.NewBufBinWriter()
for _, addr := range addrs {
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
}
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err

View file

@ -29,15 +29,13 @@ var (
func initProxyAddAccount() {
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initProxyRemoveAccount() {
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}

View file

@ -11,7 +11,6 @@ import (
"net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"text/template"
@ -106,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
fatalOnErr(errors.New("can't find account in wallet"))
}
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
@ -411,7 +410,8 @@ func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client
var err error
shuffled := slices.Clone(rpc)
shuffled := make([]string, len(rpc))
copy(shuffled, rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled {

View file

@ -9,6 +9,7 @@ import (
"io"
"os"
"slices"
"sort"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
@ -77,31 +78,13 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers()
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
sort.Slice(list, func(i, j int) bool {
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
return strings.Compare(lhs, rhs) < 0
})
return list
}
func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
cliPrm := &client.PrmContainerListStream{
XHeaders: prm.XHeaders,
OwnerID: prm.OwnerID,
Session: prm.Session,
}
rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
if err != nil {
return fmt.Errorf("init container list: %w", err)
}
err = rdr.Iterate(processCnr)
if err != nil {
return fmt.Errorf("read container list: %w", err)
}
return
}
// PutContainerPrm groups parameters of PutContainer operation.
type PutContainerPrm struct {
Client *client.Client

View file

@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" {
err := os.WriteFile(outputPath, overrideMarshalled, 0o644)
err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else {
fmt.Print("\n")

View file

@ -6,11 +6,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// flags of list command.
@ -54,60 +51,44 @@ var listContainersCmd = &cobra.Command{
var prm internalclient.ListContainersPrm
prm.SetClient(cli)
prm.OwnerID = idUser
prm.Account = idUser
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
prmGet := internalclient.GetContainerPrm{
Client: cli,
}
var containerIDs []cid.ID
err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
printContainer(cmd, prmGet, id)
return false
})
if err == nil {
return
}
if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
containerIDs = res.SortedIDList()
} else {
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}
containerIDs := res.SortedIDList()
for _, cnrID := range containerIDs {
printContainer(cmd, prmGet, cnrID)
if flagVarListName == "" && !flagVarListPrintAttr {
cmd.Println(cnrID.String())
continue
}
prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
cmd.Printf(" failed to read attributes: %v\n", err)
continue
}
cnr := res.Container()
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
continue
}
cmd.Println(cnrID.String())
if flagVarListPrintAttr {
cnr.IterateUserAttributes(func(key, val string) {
cmd.Printf(" %s: %s\n", key, val)
})
}
}
},
}
func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
if flagVarListName == "" && !flagVarListPrintAttr {
cmd.Println(id.String())
return
}
prmGet.ClientParams.ContainerID = &id
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
cmd.Printf(" failed to read attributes: %v\n", err)
return
}
cnr := res.Container()
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
return
}
cmd.Println(id.String())
if flagVarListPrintAttr {
cnr.IterateUserAttributes(func(key, val string) {
cmd.Printf(" %s: %s\n", key, val)
})
}
}
func initContainerListContainersCmd() {
commonflags.Init(listContainersCmd)

View file

@ -23,11 +23,11 @@ type policyPlaygroundREPL struct {
nodes map[string]netmap.NodeInfo
}
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{
cmd: cmd,
nodes: map[string]netmap.NodeInfo{},
}
}, nil
}
func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@ -246,7 +246,8 @@ var policyPlaygroundCmd = &cobra.Command{
Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) {
repl := newPolicyPlaygroundREPL(cmd)
repl, err := newPolicyPlaygroundREPL(cmd)
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
},
}

View file

@ -0,0 +1,56 @@
package control
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
const ignoreErrorsFlag = "no-errors"
var evacuateShardCmd = &cobra.Command{
Use: "evacuate",
Short: "Evacuate objects from shard",
Long: "Evacuate objects from shard to other shards",
Run: evacuateShard,
Deprecated: "use frostfs-cli control shards evacuation start",
}
func evacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
req.Body.Shard_ID = getShardIDList(cmd)
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.EvacuateShardResponse
var err error
err = cli.ExecRaw(func(client *client.Client) error {
resp, err = control.EvacuateShard(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Shard has successfully been evacuated.")
}
func initControlEvacuateShardCmd() {
initControlFlags(evacuateShardCmd)
flags := evacuateShardCmd.Flags()
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
flags.Bool(shardAllFlag, false, "Process all shards")
flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}

View file

@ -17,11 +17,10 @@ import (
)
const (
awaitFlag = "await"
noProgressFlag = "no-progress"
scopeFlag = "scope"
repOneOnlyFlag = "rep-one-only"
ignoreErrorsFlag = "no-errors"
awaitFlag = "await"
noProgressFlag = "no-progress"
scopeFlag = "scope"
repOneOnlyFlag = "rep-one-only"
containerWorkerCountFlag = "container-worker-count"
objectWorkerCountFlag = "object-worker-count"

View file

@ -13,6 +13,7 @@ var shardsCmd = &cobra.Command{
func initControlShardsCmd() {
shardsCmd.AddCommand(listShardsCmd)
shardsCmd.AddCommand(setShardModeCmd)
shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(evacuationShardCmd)
shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd)
@ -22,6 +23,7 @@ func initControlShardsCmd() {
initControlShardsListCmd()
initControlSetShardModeCmd()
initControlEvacuateShardCmd()
initControlEvacuationShardCmd()
initControlFlushCacheCmd()
initControlDoctorCmd()

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
@ -41,9 +42,7 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...")
_ = objectHashCmd.MarkFlagRequired("range")
flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
}
@ -67,6 +66,36 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
tz := typ == hashTz
fullHash := len(ranges) == 0
if fullHash {
var headPrm internalclient.HeadObjectPrm
headPrm.SetClient(cli)
Prepare(cmd, &headPrm)
headPrm.SetAddress(objAddr)
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
var cs checksum.Checksum
var csSet bool
if tz {
cs, csSet = res.Header().PayloadHomomorphicHash()
} else {
cs, csSet = res.Header().PayloadChecksum()
}
if csSet {
cmd.Println(hex.EncodeToString(cs.Value()))
} else {
cmd.Println("Missing checksum in object header.")
}
return
}
var hashPrm internalclient.HashPayloadRangesPrm
hashPrm.SetClient(cli)
Prepare(cmd, &hashPrm)
@ -75,7 +104,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
hashPrm.SetSalt(salt)
hashPrm.SetRanges(ranges)
if typ == hashTz {
if tz {
hashPrm.TZ()
}

View file

@ -1,12 +1,15 @@
package object
import (
"bytes"
"cmp"
"context"
"crypto/ecdsa"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"slices"
"sync"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
@ -504,6 +507,7 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
}
func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
normilizeObjectNodesResult(objects, result)
if json, _ := cmd.Flags().GetBool(commonflags.JSON); json {
printObjectNodesAsJSON(cmd, objID, objects, result)
} else {
@ -511,6 +515,34 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
}
}
func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) {
slices.SortFunc(objects, func(lhs, rhs phyObject) int {
if lhs.ecHeader == nil && rhs.ecHeader == nil {
return bytes.Compare(lhs.objectID[:], rhs.objectID[:])
}
if lhs.ecHeader == nil {
return -1
}
if rhs.ecHeader == nil {
return 1
}
if lhs.ecHeader.parent == rhs.ecHeader.parent {
return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index)
}
return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:])
})
for _, obj := range objects {
op := result.placements[obj.objectID]
slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
})
slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
})
result.placements[obj.objectID] = op
}
}
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))

View file

@ -46,7 +46,7 @@ func initObjectPatchCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2")
flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
@ -99,9 +99,11 @@ func patch(cmd *cobra.Command, _ []string) {
}
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName)
if err != nil {
return nil, err
var rawAttrs []string
raw := cmd.Flag(newAttrsFlagName).Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -50,7 +50,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2")
flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@ -214,9 +214,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
}
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice("attributes")
if err != nil {
return nil, err
var rawAttrs []string
raw := cmd.Flag("attributes").Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take data from in the form offset:length")
flags.String("range", "", "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc)
}
@ -195,10 +195,11 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
}
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
vs, err := cmd.Flags().GetStringSlice("range")
if len(vs) == 0 || err != nil {
return nil, err
v := cmd.Flag("range").Value.String()
if len(v) == 0 {
return nil, nil
}
vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs))
for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep)

View file

@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err),
zap.String("error", err.Error()),
)
} else {
c.init(ctx)

View file

@ -119,12 +119,12 @@ func shutdown(ctx context.Context) {
innerRing.Stop(ctx)
if err := metricsCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err),
zap.String("error", err.Error()),
)
}
if err := pprofCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err),
zap.String("error", err.Error()),
)
}

View file

@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err))
zap.String("error", err.Error()))
return
}

View file

@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren(
path := parentBucket.Path
parser := parentBucket.NextParser
buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
if err != nil {
return err
}
for item := range buffer {
if item.err != nil {
@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren(
}
bucket := item.val
var err error
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
if err != nil {
return err
@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
defer cancel()
// Check the current bucket's nested buckets if exist
bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range bucketsBuffer {
if item.err != nil {
@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
b := item.val
var err error
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
if err != nil {
return false, err
@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
// Check the current bucket's nested records if exist
recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range recordsBuffer {
if item.err != nil {
@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
r := item.val
var err error
r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
if err != nil {
return false, err

View file

@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
func load[T any](
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
filter func(key, value []byte) bool, transform func(key, value []byte) T,
) <-chan Item[T] {
) (<-chan Item[T], error) {
buffer := make(chan Item[T], bufferSize)
go func() {
@ -77,13 +77,13 @@ func load[T any](
}
}()
return buffer
return buffer, nil
}
func LoadBuckets(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Bucket] {
buffer := load(
) (<-chan Item[*Bucket], error) {
buffer, err := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value == nil
@ -98,14 +98,17 @@ func LoadBuckets(
}
},
)
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer
return buffer, nil
}
func LoadRecords(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Record] {
buffer := load(
) (<-chan Item[*Record], error) {
buffer, err := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value != nil
@ -121,8 +124,11 @@ func LoadRecords(
}
},
)
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer
return buffer, nil
}
// HasBuckets checks if a bucket has nested buckets. It relies on assumption
@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
buffer := load(
buffer, err := load(
ctx, db, path, 1,
nil,
func(_, value []byte) []byte { return value },
)
if err != nil {
return false, err
}
x, ok := <-buffer
if !ok {
return false, nil
}
if x.err != nil {
return false, x.err
return false, err
}
if x.val != nil {
return false, nil
return false, err
}
return true, nil
}

View file

@ -62,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error {
ctx, v.onUnmount = context.WithCancel(ctx)
tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
if err != nil {
return err
}
v.buffer = make(chan *Record, v.ui.loadBufferSize)
go func() {
@ -70,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error {
for item := range tempBuffer {
if item.err != nil {
v.ui.stopOnError(item.err)
v.ui.stopOnError(err)
break
}
record := item.val
var err error
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
if err != nil {
v.ui.stopOnError(err)

View file

@ -19,7 +19,6 @@ func initAPEManagerService(c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
c.cfgMorph.client,
apemanager.WithLogger(c.log))
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)

View file

@ -591,6 +591,8 @@ type cfgMorph struct {
client *client.Client
notaryEnabled bool
// TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration
@ -606,10 +608,9 @@ type cfgAccounting struct {
type cfgContainer struct {
scriptHash neogoutil.Uint160
parsers map[event.Type]event.NotificationParser
subscribers map[event.Type][]event.Handler
workerPool util.WorkerPool // pool for asynchronous handlers
containerBatchSize uint32
parsers map[event.Type]event.NotificationParser
subscribers map[event.Type][]event.Handler
workerPool util.WorkerPool // pool for asynchronous handlers
}
type cfgFrostfsID struct {
@ -698,7 +699,8 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector
logPrm := c.loggerPrm()
logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
@ -852,8 +854,8 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
}
func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
return cfgGRPC{
maxChunkSize: maxChunkSize,
@ -1058,7 +1060,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return sh
}
func (c *cfg) loggerPrm() *logger.Prm {
func (c *cfg) loggerPrm() (*logger.Prm, error) {
// check if it has been inited before
if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm)
@ -1077,7 +1079,7 @@ func (c *cfg) loggerPrm() *logger.Prm {
}
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger
return c.dynamicConfiguration.logger, nil
}
func (c *cfg) LocalAddress() network.AddressGroup {
@ -1119,7 +1121,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
err := ls.Close(context.WithoutCancel(ctx))
if err != nil {
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
zap.Error(err),
zap.String("error", err.Error()),
)
} else {
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
@ -1146,7 +1148,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
if cacheSize > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
}
@ -1209,7 +1211,7 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch),
zap.Error(err))
zap.String("error", err.Error()))
return
}
@ -1219,9 +1221,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil.
func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
ni := c.cfgNodeInfo.localInfo
ni.SetStatus(state)
stateSetter(&ni)
prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni)
@ -1231,7 +1233,9 @@ func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) er
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
func bootstrapOnline(ctx context.Context, c *cfg) error {
return c.bootstrapWithState(ctx, netmap.Online)
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Online)
})
}
// bootstrap calls bootstrapWithState with:
@ -1242,7 +1246,9 @@ func (c *cfg) bootstrap(ctx context.Context) error {
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(ctx, netmap.Maintenance)
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Maintenance)
})
}
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
@ -1333,7 +1339,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// Logger
logPrm := c.loggerPrm()
logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
components := c.getComponents(ctx, logPrm)
@ -1456,7 +1466,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
return container.NewInfoProvider(func() (container.Source, error) {
c.initMorphComponents(ctx)
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
if err != nil {
return nil, err
}

View file

@ -1,7 +1,6 @@
package config
import (
"slices"
"strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used
// to provide default values.
func (x *Config) SetDefault(from *Config) {
x.defaultPath = slices.Clone(from.path)
x.defaultPath = make([]string, len(from.path))
copy(x.defaultPath, from.path)
}

View file

@ -1,27 +0,0 @@
package containerconfig
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
const (
subsection = "container"
listStreamSubsection = "list_stream"
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
ContainerBatchSizeDefault = 1000
)
// ContainerBatchSize returns the value of "batch_size" config parameter
// from "list_stream" subsection of "container" section.
//
// Returns ContainerBatchSizeDefault if the value is missing or if
// the value is not positive integer.
func ContainerBatchSize(c *config.Config) uint32 {
if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
return ContainerBatchSizeDefault
}
size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
if size == 0 {
return ContainerBatchSizeDefault
}
return size
}

View file

@ -1,27 +0,0 @@
package containerconfig_test
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestContainerSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
})
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -41,10 +41,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
c.Sub(si),
)
if sc.Mode() == mode.Disabled {
continue
}
// Path for the blobstor can't be present in the default section, because different shards
// must have different paths, so if it is missing, the shard is not here.
// At the same time checking for "blobstor" section doesn't work proper
@ -54,6 +50,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
}
(*config.Config)(sc).SetDefault(def)
if sc.Mode() == mode.Disabled {
continue
}
if err := f(sc); err != nil {
return err
}

View file

@ -18,22 +18,6 @@ import (
"github.com/stretchr/testify/require"
)
func TestIterateShards(t *testing.T) {
fileConfigTest := func(c *config.Config) {
var res []string
require.NoError(t,
engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
res = append(res, sc.Metabase().Path())
return nil
}))
require.Equal(t, []string{"abc", "xyz"}, res)
}
const cfgDir = "./testdata/shards"
configtest.ForEachFileType(cfgDir, fileConfigTest)
configtest.ForEnvFileType(t, cfgDir, fileConfigTest)
}
func TestEngineSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()

View file

@ -1,3 +0,0 @@
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc
FROSTFS_STORAGE_SHARD_1_MODE=disabled
FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz

View file

@ -1,13 +0,0 @@
{
"storage.shard": {
"0": {
"metabase.path": "abc"
},
"1": {
"mode": "disabled"
},
"2": {
"metabase.path": "xyz"
}
}
}

View file

@ -1,7 +0,0 @@
storage.shard:
0:
metabase.path: abc
1:
mode: disabled
2:
metabase.path: xyz

View file

@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
//
// Returns PermDefault if the value is not a positive number.
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
p := config.UintSafe(l.cfg, "perm")
p := config.UintSafe((*config.Config)(l.cfg), "perm")
if p == 0 {
p = PermDefault
}
@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
//
// Returns false if the value is not a boolean.
func (l PersistentPolicyRulesConfig) NoSync() bool {
return config.BoolSafe(l.cfg, "no_sync")
return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
}
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.

View file

@ -5,7 +5,6 @@ import (
"context"
"net"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
@ -29,7 +28,7 @@ import (
func initContainerService(_ context.Context, c *cfg) {
// container wrapper that tries to invoke notary
// requests if chain is configured so
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
fatalOnErr(err)
c.shared.cnrClient = wrap
@ -43,12 +42,11 @@ func initContainerService(_ context.Context, c *cfg) {
fatalOnErr(err)
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
if cacheSize > 0 {
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
}
c.shared.frostfsidClient = frostfsIDSubjectProvider
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
@ -58,9 +56,7 @@ func initContainerService(_ context.Context, c *cfg) {
&c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
containerService.NewSplitterService(
c.cfgContainer.containerBatchSize, c.respSvc,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
),
)
service = containerService.NewAuditService(service, c.log, c.audit)
@ -222,7 +218,6 @@ type morphContainerReader struct {
lister interface {
ContainersOf(*user.ID) ([]cid.ID, error)
IterateContainersOf(*user.ID, func(cid.ID) error) error
}
}
@ -238,10 +233,6 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
return x.lister.ContainersOf(id)
}
func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error {
return x.lister.IterateContainersOf(id, processCID)
}
type morphContainerWriter struct {
neoClient *cntClient.Client
}

View file

@ -134,7 +134,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C
err := stopper(ctx)
if err != nil {
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
zap.Error(err),
zap.String("error", err.Error()),
)
}

View file

@ -35,16 +35,20 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
lookupScriptHashesInNNS(c) // smart contract auto negotiation
err := c.cfgMorph.client.EnableNotarySupport(
client.WithProxyContract(
c.cfgMorph.proxyScriptHash,
),
if c.cfgMorph.notaryEnabled {
err := c.cfgMorph.client.EnableNotarySupport(
client.WithProxyContract(
c.cfgMorph.proxyScriptHash,
),
)
fatalOnErr(err)
}
c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
)
fatalOnErr(err)
c.log.Info(ctx, logs.FrostFSNodeNotarySupport)
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0)
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
fatalOnErr(err)
var netmapSource netmap.Source
@ -96,7 +100,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
if err != nil {
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
zap.Error(err),
zap.String("error", err.Error()),
)
fatalOnErr(err)
@ -112,9 +116,15 @@ func initMorphClient(ctx context.Context, c *cfg) {
}
c.cfgMorph.client = cli
c.cfgMorph.notaryEnabled = cli.ProbeNotary()
}
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
// skip notary deposit in non-notary environments
if !c.cfgMorph.notaryEnabled {
return
}
tx, vub, err := makeNotaryDeposit(ctx, c)
fatalOnErr(err)
@ -151,7 +161,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
}
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil {
if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil {
return err
}
@ -168,7 +178,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
subs, err = subscriber.New(ctx, &subscriber.Params{
@ -223,17 +233,27 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse
subs map[event.Type][]event.Handler,
) {
for typ, handlers := range subs {
pi := event.NotificationParserInfo{}
pi.SetType(typ)
pi.SetScriptHash(scHash)
p, ok := parsers[typ]
if !ok {
panic(fmt.Sprintf("missing parser for event %s", typ))
}
lis.RegisterNotificationHandler(event.NotificationHandlerInfo{
Contract: scHash,
Type: typ,
Parser: p,
Handlers: handlers,
})
pi.SetParser(p)
lis.SetNotificationParser(pi)
for _, h := range handlers {
hi := event.NotificationHandlerInfo{}
hi.SetType(typ)
hi.SetScriptHash(scHash)
hi.SetHandler(h)
lis.RegisterNotificationHandler(hi)
}
}
}
@ -262,6 +282,10 @@ func lookupScriptHashesInNNS(c *cfg) {
)
for _, t := range targets {
if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
continue // ignore proxy contract if notary disabled
}
if emptyHash.Equals(*t.h) {
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)

View file

@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
}
}
s.setControlNetmapStatus(ctrlNetSt)
s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
}
// sets the current node state to the given value. Subsequent cfg.bootstrap
@ -193,14 +193,16 @@ func addNewEpochNotificationHandlers(c *cfg) {
}
})
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
_, _, err := makeNotaryDeposit(ctx, c)
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.Error(err),
)
}
})
if c.cfgMorph.notaryEnabled {
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
_, _, err := makeNotaryDeposit(ctx, c)
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.String("error", err.Error()),
)
}
})
}
}
// bootstrapNode adds current node to the Network map.
@ -423,7 +425,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.
if err != nil {
return err
}
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash)
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res)
}
type netInfo struct {

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@ -58,7 +59,7 @@ func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.Error(err),
zap.String("error", err.Error()),
)
}
@ -136,6 +137,24 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
return result, nil
}
type innerRingFetcherWithoutNotary struct {
nm *nmClient.Client
}
func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
keys, err := f.nm.GetInnerRingList()
if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
}
result := make([][]byte, 0, len(keys))
for i := range keys {
result = append(result, keys[i].Bytes())
}
return result, nil
}
func initObjectService(c *cfg) {
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
@ -215,7 +234,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
prm.MarkAsGarbage(addr)
prm.WithForceRemoval()
return ls.Inhume(ctx, prm)
_, err := ls.Inhume(ctx, prm)
return err
}
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
@ -265,9 +285,10 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr)
if err := ls.Inhume(ctx, inhumePrm); err != nil {
_, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.Error(err),
zap.String("error", err.Error()),
)
}
}),
@ -284,8 +305,13 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
}
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client,
if c.cfgMorph.client.ProbeNotary() {
return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client,
}
}
return &innerRingFetcherWithoutNotary{
nm: c.cfgNetmap.wrapper,
}
}
@ -474,7 +500,8 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
prm.WithTarget(tombstone, addrs...)
return e.engine.Inhume(ctx, prm)
_, err := e.engine.Inhume(ctx, prm)
return err
}
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {

View file

@ -113,7 +113,7 @@ func initTreeService(c *cfg) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
zap.Error(err))
zap.String("error", err.Error()))
}
})

View file

@ -2,6 +2,7 @@ package ape
const (
RuleFlag = "rule"
RuleFlagDesc = "Rule statement"
PathFlag = "path"
PathFlagDesc = "Path to encoded chain in JSON or binary format"
TargetNameFlag = "target-name"
@ -16,64 +17,3 @@ const (
ChainNameFlagDesc = "Chain name(ingress|s3)"
AllFlag = "all"
)
const RuleFlagDesc = `Defines an Access Policy Engine (APE) rule in the format:
<status>[:status_detail] <action>... <condition>... <resource>...
Status:
- allow Permits specified actions
- deny Prohibits specified actions
- deny:QuotaLimitReached Denies access due to quota limits
Actions:
Object operations:
- Object.Put, Object.Get, etc.
- Object.* (all object operations)
Container operations:
- Container.Put, Container.Get, etc.
- Container.* (all container operations)
Conditions:
ResourceCondition:
Format: ResourceCondition:"key"=value, "key"!=value
Reserved properties (use '\' before '$'):
- $Object:version
- $Object:objectID
- $Object:containerID
- $Object:ownerID
- $Object:creationEpoch
- $Object:payloadLength
- $Object:payloadHash
- $Object:objectType
- $Object:homomorphicHash
RequestCondition:
Format: RequestCondition:"key"=value, "key"!=value
Reserved properties (use '\' before '$'):
- $Actor:publicKey
- $Actor:role
Example:
ResourceCondition:"check_key"!="check_value" RequestCondition:"$Actor:role"=others
Resources:
For objects:
- namespace/cid/oid (specific object)
- namespace/cid/* (all objects in container)
- namespace/* (all objects in namespace)
- * (all objects)
- /* (all objects in root namespace)
- /cid/* (all objects in root container)
- /cid/oid (specific object in root container)
For containers:
- namespace/cid (specific container)
- namespace/* (all containers in namespace)
- * (all containers)
- /cid (root container)
- /* (all root containers)
Notes:
- Cannot mix object and container operations in one rule
- Default behavior is Any=false unless 'any' is specified
- Use 'all' keyword to explicitly set Any=false`

View file

@ -83,9 +83,6 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
FROSTFS_REPLICATOR_POOL_SIZE=10
# Container service section
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
# Object service section
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200

View file

@ -124,11 +124,6 @@
"pool_size": 10,
"put_timeout": "15s"
},
"container": {
"list_stream": {
"batch_size": "500"
}
},
"object": {
"delete": {
"tombstone_lifetime": 10

View file

@ -79,8 +79,7 @@ contracts: # side chain NEOFS contract script hashes; optional, override values
morph:
dial_timeout: 30s # timeout for side chain NEO RPC client connection
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls).
# Negative value disables caching. A zero value sets the default value.
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
# Default value: block time. It is recommended to have this value less or equal to block time.
# Cached entities: containers, container lists, eACL tables.
container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
@ -109,10 +108,6 @@ replicator:
put_timeout: 15s # timeout for the Replicator PUT remote operation
pool_size: 10 # maximum amount of concurrent replications
container:
list_stream:
batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once
object:
delete:
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs

View file

@ -42,6 +42,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080",
@ -97,6 +98,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082",
@ -152,6 +154,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084",
@ -207,6 +210,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086",

View file

@ -95,15 +95,19 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release
### Prepare and push images to a Docker registry (automated)
### Prepare and push images to a Docker Hub (if not automated)
Create Docker images for all applications and push them into container registry
(executed automatically in Forgejo Actions upon pushing a release tag):
Create Docker images for all applications and push them into Docker Hub
(requires [organization](https://hub.docker.com/u/truecloudlab) privileges)
```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images
$ make push-images
$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
```
### Make a proper release (if not automated)

34
go.mod
View file

@ -4,11 +4,11 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250129133430-d195cb510401
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
@ -27,7 +27,7 @@ require (
github.com/klauspost/compress v1.17.4
github.com/mailru/easyjson v0.7.7
github.com/mr-tron/base58 v1.2.0
github.com/multiformats/go-multiaddr v0.14.0
github.com/multiformats/go-multiaddr v0.12.1
github.com/nspcc-dev/neo-go v0.106.3
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
@ -40,15 +40,15 @@ require (
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.10
go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/trace v1.31.0
go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/sync v0.10.0
golang.org/x/sys v0.28.0
golang.org/x/term v0.27.0
google.golang.org/grpc v1.69.2
google.golang.org/protobuf v1.36.1
golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
golang.org/x/term v0.21.0
google.golang.org/grpc v1.66.2
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1
)
@ -119,15 +119,15 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/text v0.21.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/text v0.16.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect

BIN
go.sum

Binary file not shown.

View file

@ -146,6 +146,7 @@ const (
ClientCantGetBlockchainHeight = "can't get blockchain height"
ClientCantGetBlockchainHeight243 = "can't get blockchain height"
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
EventCouldNotStartListenToEvents = "could not start listen to events"
EventStopEventListenerByError = "stop event listener by error"
EventStopEventListenerByContext = "stop event listener by context"
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
@ -163,9 +164,17 @@ const (
EventNotaryParserNotSet = "notary parser not set"
EventCouldNotParseNotaryEvent = "could not parse notary event"
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
EventIgnoreNilEventParser = "ignore nil event parser"
EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser"
EventRegisteredNewEventParser = "registered new event parser"
EventIgnoreNilEventHandler = "ignore nil event handler"
EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser"
EventRegisteredNewEventHandler = "registered new event handler"
EventIgnoreNilNotaryEventParser = "ignore nil notary event parser"
EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser"
EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler"
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
EventIgnoreNilBlockHandler = "ignore nil block handler"
StorageOperation = "local object storage operation"
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
BlobovniczaOpeningBoltDB = "opening BoltDB"
@ -383,6 +392,7 @@ const (
FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown"
FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing"
FrostFSNodeConfigurationReading = "configuration reading"
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation"
FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
FrostFSNodePoolConfigurationUpdate = "adjust pool configuration"

View file

@ -12,9 +12,8 @@ type ApplicationInfo struct {
func NewApplicationInfo(version string) *ApplicationInfo {
appInfo := &ApplicationInfo{
versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "app_info",
Help: "General information about the application.",
Name: "app_info",
Help: "General information about the application.",
}, []string{"version"}),
}
appInfo.versionValue.With(prometheus.Labels{"version": version})

View file

@ -31,7 +31,9 @@ type RPCActorProvider interface {
type ProxyVerificationContractStorage struct {
rpcActorProvider RPCActorProvider
cosigners []actor.SignerAccount
acc *wallet.Account
proxyScriptHash util.Uint160
policyScriptHash util.Uint160
}
@ -39,27 +41,12 @@ type ProxyVerificationContractStorage struct {
var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil)
func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage {
acc := wallet.NewAccountFromPrivateKey(key)
return &ProxyVerificationContractStorage{
rpcActorProvider: rpcActorProvider,
cosigners: []actor.SignerAccount{
{
Signer: transaction.Signer{
Account: proxyScriptHash,
Scopes: transaction.CustomContracts,
AllowedContracts: []util.Uint160{policyScriptHash},
},
Account: notary.FakeContractAccount(proxyScriptHash),
},
{
Signer: transaction.Signer{
Account: acc.Contract.ScriptHash(),
Scopes: transaction.CalledByEntry,
},
Account: acc,
},
},
acc: wallet.NewAccountFromPrivateKey(key),
proxyScriptHash: proxyScriptHash,
policyScriptHash: policyScriptHash,
}
@ -77,7 +64,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke {
func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) {
rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
act, err := actor.New(rpcActor, contractStorage.cosigners)
act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash))
if err != nil {
return nil, err
}
@ -111,16 +98,31 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na
// ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners.
func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor}
return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
// contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
// ProxyVerificationContractStorage does not manage reconnections.
contractStorageActor, err := contractStorage.newContractStorageActor()
if err != nil {
return nil, err
}
return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
}
type invokerAdapter struct {
*invoker.Invoker
rpcInvoker invoker.RPCInvoke
}
func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
return n.rpcInvoker
func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount {
return []actor.SignerAccount{
{
Signer: transaction.Signer{
Account: proxyScriptHash,
Scopes: transaction.CustomContracts,
AllowedContracts: []util.Uint160{policyScriptHash},
},
Account: notary.FakeContractAccount(proxyScriptHash),
},
{
Signer: transaction.Signer{
Account: acc.Contract.ScriptHash(),
Scopes: transaction.CalledByEntry,
},
Account: acc,
},
}
}

View file

@ -67,7 +67,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
if err != nil {
// do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
zap.Error(err))
zap.String("error", err.Error()))
} else if isInnerRingNode {
return &ClassifyResult{
Role: acl.RoleInnerRing,
@ -84,7 +84,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
// is not possible for previous epoch, so
// do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
zap.Error(err))
zap.String("error", err.Error()))
} else if isContainerNode {
return &ClassifyResult{
Role: acl.RoleContainer,

View file

@ -8,6 +8,7 @@ type (
// ContractProcessor interface defines functions for binding event producers
// such as event.Listener and Timers with contract processor.
ContractProcessor interface {
ListenerNotificationParsers() []event.NotificationParserInfo
ListenerNotificationHandlers() []event.NotificationHandlerInfo
ListenerNotaryParsers() []event.NotaryParserInfo
ListenerNotaryHandlers() []event.NotaryHandlerInfo
@ -15,6 +16,11 @@ type (
)
func connectListenerWithProcessor(l event.Listener, p ContractProcessor) {
// register notification parsers
for _, parser := range p.ListenerNotificationParsers() {
l.SetNotificationParser(parser)
}
// register notification handlers
for _, handler := range p.ListenerNotificationHandlers() {
l.RegisterNotificationHandler(handler)

View file

@ -38,7 +38,10 @@ import (
func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
alphaSync event.Handler,
) error {
locodeValidator := s.newLocodeValidator(cfg)
locodeValidator, err := s.newLocodeValidator(cfg)
if err != nil {
return err
}
netSettings := (*networkSettings)(s.netmapClient)
@ -48,7 +51,6 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
poolSize := cfg.GetInt("workers.netmap")
s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
var err error
s.netmapProcessor, err = netmap.New(&netmap.Params{
Log: s.log,
Metrics: s.irMetrics,
@ -98,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil {
fromMainChainBlock = 0
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
}
mainnetChain.from = fromMainChainBlock
@ -378,6 +380,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
// form morph container client's options
morphCnrOpts := make([]container.Option, 0, 3)
morphCnrOpts = append(morphCnrOpts,
container.TryNotary(),
container.AsAlphabet(),
)
@ -387,12 +390,12 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
}
s.containerClient = result.CnrClient
s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet())
s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet())
if err != nil {
return nil, err
}
s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet())
s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet())
if err != nil {
return nil, err
}
@ -454,7 +457,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
morphChain := &chainParams{

View file

@ -177,7 +177,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
if err != nil {
// we don't stop inner ring execution on this error
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
zap.Error(err))
zap.String("error", err.Error()))
}
s.tickInitialExpoch(ctx)
@ -308,7 +308,7 @@ func (s *Server) Stop(ctx context.Context) {
for _, c := range s.closers {
if err := c(); err != nil {
s.log.Warn(ctx, logs.InnerringCloserError,
zap.Error(err),
zap.String("error", err.Error()),
)
}
}

View file

@ -9,7 +9,7 @@ import (
"github.com/spf13/viper"
)
func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) {
locodeDB := locodebolt.New(locodebolt.Prm{
Path: cfg.GetString("locode.db.path"),
},
@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
return irlocode.New(irlocode.Prm{
DB: (*locodeBoltDBWrapper)(locodeDB),
})
}), nil
}
type locodeBoltEntryWrapper struct {

View file

@ -33,7 +33,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
// there is no signature collecting, so we don't need extra fee
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
return false
}
@ -47,7 +47,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
networkMap, err := ap.netmapClient.NetMap()
if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
zap.Error(err))
zap.String("error", err.Error()))
return false
}
@ -83,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
zap.Error(err))
zap.String("error", err.Error()))
continue
}
@ -93,7 +93,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
zap.Error(err),
zap.String("error", err.Error()),
)
}
}
@ -110,7 +110,7 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
zap.Error(err),
zap.String("error", err.Error()),
)
}
}

View file

@ -114,6 +114,11 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) {
ap.pwLock.Unlock()
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
return nil
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil

View file

@ -88,16 +88,32 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
var parsers []event.NotificationParserInfo
// new lock event
lock := event.NotificationParserInfo{}
lock.SetType(lockNotification)
lock.SetScriptHash(bp.balanceSC)
lock.SetParser(balanceEvent.ParseLock)
parsers = append(parsers, lock)
return parsers
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{
{
Contract: bp.balanceSC,
Type: lockNotification,
Parser: balanceEvent.ParseLock,
Handlers: []event.Handler{bp.handleLock},
},
}
var handlers []event.NotificationHandlerInfo
// lock handler
lock := event.NotificationHandlerInfo{}
lock.SetType(lockNotification)
lock.SetScriptHash(bp.balanceSC)
lock.SetHandler(bp.handleLock)
handlers = append(handlers, lock)
return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -50,7 +50,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
err := cp.checkPutContainer(pctx)
if err != nil {
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
zap.Error(err),
zap.String("error", err.Error()),
)
return false
@ -58,7 +58,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
zap.Error(err),
zap.String("error", err.Error()),
)
return false
}
@ -113,7 +113,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
err := cp.checkDeleteContainer(e)
if err != nil {
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
zap.Error(err),
zap.String("error", err.Error()),
)
return false
@ -121,7 +121,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
zap.Error(err),
zap.String("error", err.Error()),
)
return false

View file

@ -118,6 +118,11 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
return nil
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil

View file

@ -73,7 +73,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
zap.Error(err))
zap.String("error", err.Error()))
return false
}

View file

@ -142,34 +142,70 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
var (
parsers = make([]event.NotificationParserInfo, 0, 6)
p event.NotificationParserInfo
)
p.SetScriptHash(np.frostfsContract)
// deposit event
p.SetType(event.TypeFromString(depositNotification))
p.SetParser(frostfsEvent.ParseDeposit)
parsers = append(parsers, p)
// withdraw event
p.SetType(event.TypeFromString(withdrawNotification))
p.SetParser(frostfsEvent.ParseWithdraw)
parsers = append(parsers, p)
// cheque event
p.SetType(event.TypeFromString(chequeNotification))
p.SetParser(frostfsEvent.ParseCheque)
parsers = append(parsers, p)
// config event
p.SetType(event.TypeFromString(configNotification))
p.SetParser(frostfsEvent.ParseConfig)
parsers = append(parsers, p)
return parsers
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{
{
Contract: np.frostfsContract,
Type: event.TypeFromString(depositNotification),
Parser: frostfsEvent.ParseDeposit,
Handlers: []event.Handler{np.handleDeposit},
},
{
Contract: np.frostfsContract,
Type: event.TypeFromString(withdrawNotification),
Parser: frostfsEvent.ParseWithdraw,
Handlers: []event.Handler{np.handleWithdraw},
},
{
Contract: np.frostfsContract,
Type: event.TypeFromString(chequeNotification),
Parser: frostfsEvent.ParseCheque,
Handlers: []event.Handler{np.handleCheque},
},
{
Contract: np.frostfsContract,
Type: event.TypeFromString(configNotification),
Parser: frostfsEvent.ParseConfig,
Handlers: []event.Handler{np.handleConfig},
},
}
var (
handlers = make([]event.NotificationHandlerInfo, 0, 6)
h event.NotificationHandlerInfo
)
h.SetScriptHash(np.frostfsContract)
// deposit handler
h.SetType(event.TypeFromString(depositNotification))
h.SetHandler(np.handleDeposit)
handlers = append(handlers, h)
// withdraw handler
h.SetType(event.TypeFromString(withdrawNotification))
h.SetHandler(np.handleWithdraw)
handlers = append(handlers, h)
// cheque handler
h.SetType(event.TypeFromString(chequeNotification))
h.SetHandler(np.handleCheque)
handlers = append(handlers, h)
// config handler
h.SetType(event.TypeFromString(configNotification))
h.SetHandler(np.handleConfig)
handlers = append(handlers, h)
return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -28,21 +28,21 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
zap.Error(err))
zap.String("error", err.Error()))
return false
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
zap.Error(err))
zap.String("error", err.Error()))
return false
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
zap.Error(err))
zap.String("error", err.Error()))
return false
}
@ -65,7 +65,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
zap.Error(err))
zap.String("error", err.Error()))
}
// 2. Update NeoFSAlphabet role in the sidechain.
@ -98,14 +98,14 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
zap.Error(err))
zap.String("error", err.Error()))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
zap.Error(err))
zap.String("error", err.Error()))
return
}
@ -122,7 +122,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
zap.Error(err))
zap.String("error", err.Error()))
}
}
@ -135,7 +135,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabe
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
zap.Error(err))
zap.String("error", err.Error()))
}
}
@ -155,6 +155,6 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
zap.Error(err))
zap.String("error", err.Error()))
}
}

View file

@ -155,16 +155,22 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
var pi event.NotificationParserInfo
pi.SetScriptHash(gp.designate)
pi.SetType(event.TypeFromString(native.DesignationEventName))
pi.SetParser(rolemanagement.ParseDesignate)
return []event.NotificationParserInfo{pi}
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{
{
Contract: gp.designate,
Type: event.TypeFromString(native.DesignationEventName),
Parser: rolemanagement.ParseDesignate,
Handlers: []event.Handler{gp.HandleAlphabetSync},
},
}
var hi event.NotificationHandlerInfo
hi.SetScriptHash(gp.designate)
hi.SetType(event.TypeFromString(native.DesignationEventName))
hi.SetHandler(gp.HandleAlphabetSync)
return []event.NotificationHandlerInfo{hi}
}
// ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -49,7 +49,7 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
})
if err != nil {
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
zap.Error(err))
zap.String("error", err.Error()))
return false
}

View file

@ -17,7 +17,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
epochDuration, err := np.netmapClient.EpochDuration()
if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
zap.Error(err))
zap.String("error", err.Error()))
} else {
np.epochState.SetEpochDuration(epochDuration)
}
@ -28,19 +28,19 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
zap.String("hash", ev.TxHash().StringLE()),
zap.Error(err))
zap.String("error", err.Error()))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
zap.Error(err))
zap.String("error", err.Error()))
}
// get new netmap snapshot
networkMap, err := np.netmapClient.NetMap()
if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
zap.Error(err))
zap.String("error", err.Error()))
return false
}

View file

@ -42,7 +42,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil {
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
zap.Error(err),
zap.String("error", err.Error()),
)
return false

View file

@ -161,16 +161,36 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
parsers := make([]event.NotificationParserInfo, 0, 3)
var p event.NotificationParserInfo
p.SetScriptHash(np.netmapClient.ContractAddress())
// new epoch event
p.SetType(newEpochNotification)
p.SetParser(netmapEvent.ParseNewEpoch)
parsers = append(parsers, p)
return parsers
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{
{
Contract: np.netmapClient.ContractAddress(),
Type: newEpochNotification,
Parser: netmapEvent.ParseNewEpoch,
Handlers: []event.Handler{np.handleNewEpoch},
},
}
handlers := make([]event.NotificationHandlerInfo, 0, 3)
var i event.NotificationHandlerInfo
i.SetScriptHash(np.netmapClient.ContractAddress())
// new epoch handler
i.SetType(newEpochNotification)
i.SetHandler(np.handleNewEpoch)
handlers = append(handlers, i)
return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -62,7 +62,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool {
func (s *Server) InnerRingIndex(ctx context.Context) int {
index, err := s.statusIndex.InnerRingIndex()
if err != nil {
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
return -1
}
@ -74,7 +74,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int {
func (s *Server) InnerRingSize(ctx context.Context) int {
size, err := s.statusIndex.InnerRingSize()
if err != nil {
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
return 0
}
@ -86,7 +86,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
func (s *Server) AlphabetIndex(ctx context.Context) int {
index, err := s.statusIndex.AlphabetIndex()
if err != nil {
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
return -1
}
@ -132,7 +132,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
zap.Error(err))
zap.String("error", err.Error()))
}
})

View file

@ -129,7 +129,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
})
})
if err != nil {
return fmt.Errorf("determine DB size: %w", err)
return fmt.Errorf("can't determine DB size: %w", err)
}
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
@ -140,7 +140,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
return saveItemsCount(tx, items)
}); err != nil {
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
return fmt.Errorf("save blobovnicza's size and items count: %w", err)
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
}
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
}

View file

@ -6,6 +6,7 @@ import (
"syscall"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -93,6 +94,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
zap.String("binary size", stringifyByteSize(dataSize)),
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
b.itemDeleted(recordSize)
}

Some files were not shown because too many files have changed in this diff Show more