Compare commits
98 commits
support/v0
...
master
Author | SHA1 | Date | |
---|---|---|---|
ceac1c8709 | |||
f7e75b13b0 | |||
198aaebc94 | |||
85af6bcd5c | |||
8a658de0b2 | |||
3900b92927 | |||
5ccb3394b4 | |||
dc410fca90 | |||
cddcd73f04 | |||
d7fcc5ce30 | |||
c0221d76e6 | |||
242f0095d0 | |||
6fe34d266a | |||
fa08bfa553 | |||
0da998ef50 | |||
e44782473a | |||
9cd1bcef06 | |||
ca0a33ea0f | |||
f6c5222952 | |||
ea868e09f8 | |||
31d3d299bf | |||
b5b4f78b49 | |||
2832f44437 | |||
7c3bcb0f44 | |||
e64871c3fd | |||
303cd35a01 | |||
bb9ba1bce2 | |||
db03742d33 | |||
148d68933b | |||
51ee132ea3 | |||
226dd25dd0 | |||
bd0197eaa8 | |||
e44b84c18c | |||
bed49e6ace | |||
df05057ed4 | |||
b6c8ebf493 | |||
6e82661c35 | |||
1a091ea7bb | |||
7ac3542714 | |||
f0c43c8d80 | |||
8ba9f31fca | |||
2af3409d39 | |||
d165ac042c | |||
7151c71d51 | |||
91d9dc2676 | |||
7853dbc315 | |||
3821645085 | |||
72470d6b48 | |||
e9837bbcf9 | |||
a641c91594 | |||
b1614a284d | |||
d0ce835fbf | |||
dfa51048a8 | |||
670305a721 | |||
1f6cf57e30 | |||
386a12eea4 | |||
15139d80c9 | |||
41da27dad5 | |||
ac0511d214 | |||
7e542906ef | |||
d1bc4351c3 | |||
1c12f23b84 | |||
a353d45742 | |||
d5c46d812a | |||
d5d5ce2074 | |||
7df3520d48 | |||
5fe78e51d1 | |||
84b4051b4d | |||
6a51086030 | |||
5c3b2d95ba | |||
2d5d4093be | |||
e3487d5af5 | |||
e37dcdf88b | |||
6c679d1535 | |||
281d65435e | |||
b348b20289 | |||
748edd1999 | |||
47dfd8840c | |||
432042c534 | |||
9cabca9dfe | |||
60feed3b5f | |||
635a292ae4 | |||
edfa3f4825 | |||
e0ac3a583f | |||
00c608c05e | |||
bba1892fa1 | |||
01acec708f | |||
aac65001e5 | |||
1170370753 | |||
9e275d44c8 | |||
2469e0c683 | |||
a6ef4ab524 | |||
49959c4166 | |||
61ee1b5610 | |||
b10c954377 | |||
1605391628 | |||
b1766e47c7 | |||
caa4253249 |
239 changed files with 2230 additions and 2365 deletions
3
CODEOWNERS
Normal file
3
CODEOWNERS
Normal file
|
@ -0,0 +1,3 @@
|
|||
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
|
||||
.forgejo/.* @potyarkin
|
||||
Makefile @potyarkin
|
10
Makefile
10
Makefile
|
@ -270,10 +270,12 @@ env-up: all
|
|||
echo "Frostfs contracts not found"; exit 1; \
|
||||
fi
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
|
||||
--storage-wallet ./dev/storage/wallet01.json \
|
||||
--storage-wallet ./dev/storage/wallet02.json \
|
||||
--storage-wallet ./dev/storage/wallet03.json \
|
||||
--storage-wallet ./dev/storage/wallet04.json
|
||||
|
||||
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
|
||||
make locode-download; \
|
||||
fi
|
||||
|
|
|
@ -135,7 +135,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve container contract hash: %w", err)
|
||||
}
|
||||
cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
|
||||
cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create morph container client: %w", err)
|
||||
}
|
||||
|
|
|
@ -53,16 +53,15 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
|
|||
}
|
||||
|
||||
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
var ch util.Uint160
|
||||
r := management.NewReader(inv)
|
||||
nnsCs, err := helper.GetContractByID(r, 1)
|
||||
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
|
||||
|
||||
ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
|
||||
ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
|
||||
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
|
||||
|
||||
invokerAdapter := &invokerAdapter{
|
||||
|
@ -74,7 +73,7 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
|
|||
}
|
||||
|
||||
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName)
|
||||
|
|
|
@ -51,7 +51,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
|
|||
nmHash util.Uint160
|
||||
)
|
||||
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
const forceConfigSet = "force"
|
||||
|
||||
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("invalid filename: %w", err)
|
||||
}
|
||||
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
|
|||
}
|
||||
|
||||
func listContainers(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ type contractDumpInfo struct {
|
|||
}
|
||||
|
||||
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package frostfsid
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
|
@ -489,10 +488,6 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
|
|||
}
|
||||
f.bw.Reset()
|
||||
|
||||
if len(f.wCtx.SentTxs) == 0 {
|
||||
return nil, errors.New("no transactions to wait")
|
||||
}
|
||||
|
||||
f.wCtx.Command.Println("Waiting for transactions to persist...")
|
||||
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
|
||||
}
|
||||
|
@ -514,7 +509,7 @@ func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, se
|
|||
}
|
||||
|
||||
func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -1,59 +1,12 @@
|
|||
package frostfsid
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFrostfsIDConfig(t *testing.T) {
|
||||
pks := make([]*keys.PrivateKey, 4)
|
||||
for i := range pks {
|
||||
pk, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
pks[i] = pk
|
||||
}
|
||||
|
||||
fmts := []string{
|
||||
pks[0].GetScriptHash().StringLE(),
|
||||
address.Uint160ToString(pks[1].GetScriptHash()),
|
||||
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
|
||||
hex.EncodeToString(pks[3].PublicKey().Bytes()),
|
||||
}
|
||||
|
||||
for i := range fmts {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", fmts[i])
|
||||
|
||||
actual, found, err := helper.GetFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, pks[i].GetScriptHash(), actual)
|
||||
}
|
||||
|
||||
t.Run("bad key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", "abc")
|
||||
|
||||
_, found, err := helper.GetFrostfsIDAdmin(v)
|
||||
require.Error(t, err)
|
||||
require.True(t, found)
|
||||
})
|
||||
t.Run("missing key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
|
||||
_, found, err := helper.GetFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNamespaceRegexp(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
|
@ -141,60 +140,29 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
|
|||
}
|
||||
|
||||
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
|
||||
return refillGas(cmd, storageGasConfigFlag, true)
|
||||
}
|
||||
|
||||
func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
|
||||
// storage wallet path is not part of the config
|
||||
storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
|
||||
// wallet address is not part of the config
|
||||
walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
|
||||
|
||||
var gasReceiver util.Uint160
|
||||
|
||||
if len(walletAddress) != 0 {
|
||||
gasReceiver, err = address.StringToUint160(walletAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
|
||||
}
|
||||
} else {
|
||||
if storageWalletPath == "" {
|
||||
return fmt.Errorf("missing wallet path (use '--%s <out.json>')", commonflags.StorageWalletFlag)
|
||||
}
|
||||
|
||||
var w *wallet.Wallet
|
||||
|
||||
if createWallet {
|
||||
w, err = wallet.NewWallet(storageWalletPath)
|
||||
} else {
|
||||
w, err = wallet.NewWalletFromFile(storageWalletPath)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create wallet: %w", err)
|
||||
}
|
||||
|
||||
if createWallet {
|
||||
var password string
|
||||
|
||||
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
|
||||
password, err := config.GetStoragePassword(viper.GetViper(), label)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch password: %w", err)
|
||||
}
|
||||
|
||||
if label == "" {
|
||||
label = constants.SingleAccountName
|
||||
}
|
||||
|
||||
if err := w.CreateAccount(label, password); err != nil {
|
||||
return fmt.Errorf("can't create account: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
gasReceiver = w.Accounts[0].Contract.ScriptHash()
|
||||
walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
|
||||
w, err := wallet.NewWallet(walletPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create wallet: %w", err)
|
||||
}
|
||||
|
||||
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
|
||||
password, err := config.GetStoragePassword(viper.GetViper(), label)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch password: %w", err)
|
||||
}
|
||||
|
||||
if label == "" {
|
||||
label = constants.SingleAccountName
|
||||
}
|
||||
|
||||
if err := w.CreateAccount(label, password); err != nil {
|
||||
return fmt.Errorf("can't create account: %w", err)
|
||||
}
|
||||
return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
|
||||
}
|
||||
|
||||
func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
|
||||
gasStr := viper.GetString(gasFlag)
|
||||
|
||||
gasAmount, err := helper.ParseGASAmount(gasStr)
|
||||
|
@ -208,9 +176,11 @@ func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error
|
|||
}
|
||||
|
||||
bw := io.NewBufBinWriter()
|
||||
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
|
||||
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
|
||||
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
|
||||
for _, gasReceiver := range gasReceivers {
|
||||
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
|
||||
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
|
||||
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
|
||||
}
|
||||
if bw.Err != nil {
|
||||
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,12 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
@ -33,7 +38,27 @@ var (
|
|||
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
|
||||
storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
|
||||
walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
|
||||
|
||||
var gasReceivers []util.Uint160
|
||||
for _, walletAddress := range walletAddresses {
|
||||
addr, err := address.StringToUint160(walletAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
|
||||
}
|
||||
|
||||
gasReceivers = append(gasReceivers, addr)
|
||||
}
|
||||
for _, storageWalletPath := range storageWalletPaths {
|
||||
w, err := wallet.NewWalletFromFile(storageWalletPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create wallet: %w", err)
|
||||
}
|
||||
|
||||
gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
|
||||
}
|
||||
return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
|
||||
},
|
||||
}
|
||||
GenerateAlphabetCmd = &cobra.Command{
|
||||
|
@ -50,10 +75,10 @@ var (
|
|||
func initRefillGasCmd() {
|
||||
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
|
||||
RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
|
||||
RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
|
||||
RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
|
||||
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
|
||||
RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
|
||||
RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
|
||||
}
|
||||
|
||||
func initGenerateStorageCmd() {
|
||||
|
|
|
@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any
|
|||
h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker)
|
||||
}
|
||||
if method != constants.UpdateMethodName || err == nil && !found {
|
||||
h, found, err = GetFrostfsIDAdmin(viper.GetViper())
|
||||
h, found, err = getFrostfsIDAdmin(viper.GetViper())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
const frostfsIDAdminConfigKey = "frostfsid.admin"
|
||||
|
||||
func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
|
||||
func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
|
||||
admin := v.GetString(frostfsIDAdminConfigKey)
|
||||
if admin == "" {
|
||||
return util.Uint160{}, false, nil
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package helper
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFrostfsIDConfig(t *testing.T) {
|
||||
pks := make([]*keys.PrivateKey, 4)
|
||||
for i := range pks {
|
||||
pk, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
pks[i] = pk
|
||||
}
|
||||
|
||||
fmts := []string{
|
||||
pks[0].GetScriptHash().StringLE(),
|
||||
address.Uint160ToString(pks[1].GetScriptHash()),
|
||||
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
|
||||
hex.EncodeToString(pks[3].PublicKey().Bytes()),
|
||||
}
|
||||
|
||||
for i := range fmts {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", fmts[i])
|
||||
|
||||
actual, found, err := getFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, pks[i].GetScriptHash(), actual)
|
||||
}
|
||||
|
||||
t.Run("bad key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", "abc")
|
||||
|
||||
_, found, err := getFrostfsIDAdmin(v)
|
||||
require.Error(t, err)
|
||||
require.True(t, found)
|
||||
})
|
||||
t.Run("missing key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
|
||||
_, found, err := getFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
})
|
||||
}
|
|
@ -134,12 +134,12 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex
|
|||
return nil, err
|
||||
}
|
||||
|
||||
accounts, err := createWalletAccounts(wallets)
|
||||
accounts, err := getSingleAccounts(wallets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cliCtx, err := DefaultClientContext(c, committeeAcc)
|
||||
cliCtx, err := defaultClientContext(c, committeeAcc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("client context: %w", err)
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet)
|
|||
}
|
||||
c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String())
|
||||
} else {
|
||||
c, err = GetN3Client(v)
|
||||
c, err = NewRemoteClient(v)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't create N3 client: %w", err)
|
||||
|
@ -211,7 +211,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
|
|||
return ctrPath, nil
|
||||
}
|
||||
|
||||
func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
|
||||
func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
|
||||
accounts := make([]*wallet.Account, len(wallets))
|
||||
for i, w := range wallets {
|
||||
acc, err := GetWalletAccount(w, constants.SingleAccountName)
|
||||
|
|
|
@ -58,17 +58,59 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
|
|||
return nil, err
|
||||
}
|
||||
|
||||
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount))
|
||||
accounts := make([]*wallet.Account, len(wallets))
|
||||
for i := range accounts {
|
||||
accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
go bc.Run()
|
||||
|
||||
accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cmd.Name() != "init" {
|
||||
if err := restoreDump(bc, dumpPath); err != nil {
|
||||
return nil, fmt.Errorf("restore dump: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &LocalClient{
|
||||
bc: bc,
|
||||
dumpPath: dumpPath,
|
||||
accounts: accounts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func restoreDump(bc *core.Blockchain, dumpPath string) error {
|
||||
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open local dump: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
r := io.NewBinReaderFromIO(f)
|
||||
|
||||
var skip uint32
|
||||
if bc.BlockHeight() != 0 {
|
||||
skip = bc.BlockHeight() + 1
|
||||
}
|
||||
|
||||
count := r.ReadU32LE() - skip
|
||||
if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) {
|
||||
accounts := make([]*wallet.Account, len(wallets))
|
||||
for i := range accounts {
|
||||
acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accounts[i] = acc
|
||||
}
|
||||
|
||||
indexMap := make(map[string]int)
|
||||
for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee {
|
||||
for i, pub := range cfg.StandbyCommittee {
|
||||
indexMap[pub] = i
|
||||
}
|
||||
|
||||
|
@ -77,37 +119,12 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
|
|||
pj := accounts[j].PrivateKey().PublicKey().Bytes()
|
||||
return indexMap[string(pi)] < indexMap[string(pj)]
|
||||
})
|
||||
sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool {
|
||||
sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool {
|
||||
return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
|
||||
})
|
||||
|
||||
go bc.Run()
|
||||
|
||||
if cmd.Name() != "init" {
|
||||
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't open local dump: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
r := io.NewBinReaderFromIO(f)
|
||||
|
||||
var skip uint32
|
||||
if bc.BlockHeight() != 0 {
|
||||
skip = bc.BlockHeight() + 1
|
||||
}
|
||||
|
||||
count := r.ReadU32LE() - skip
|
||||
if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
|
||||
return nil, fmt.Errorf("can't restore local dump: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &LocalClient{
|
||||
bc: bc,
|
||||
dumpPath: dumpPath,
|
||||
accounts: accounts[:m],
|
||||
}, nil
|
||||
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount))
|
||||
return accounts[:m], nil
|
||||
}
|
||||
|
||||
func (l *LocalClient) GetBlockCount() (uint32, error) {
|
||||
|
@ -128,11 +145,6 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul
|
|||
return &a, nil
|
||||
}
|
||||
|
||||
func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) {
|
||||
// not used by `morph init` command
|
||||
panic("unexpected call")
|
||||
}
|
||||
|
||||
// InvokeFunction is implemented via `InvokeScript`.
|
||||
func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) {
|
||||
var err error
|
||||
|
@ -296,13 +308,7 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer)
|
|||
}
|
||||
|
||||
func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) {
|
||||
// We need to test that transaction was formed correctly to catch as many errors as we can.
|
||||
bs := tx.Bytes()
|
||||
_, err := transaction.NewTransactionFromBytes(bs)
|
||||
if err != nil {
|
||||
return tx.Hash(), fmt.Errorf("invalid transaction: %w", err)
|
||||
}
|
||||
|
||||
tx = tx.Copy()
|
||||
l.transactions = append(l.transactions, tx)
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
|
@ -25,15 +24,10 @@ import (
|
|||
// Client represents N3 client interface capable of test-invoking scripts
|
||||
// and sending signed transactions to chain.
|
||||
type Client interface {
|
||||
invoker.RPCInvoke
|
||||
actor.RPCActor
|
||||
|
||||
GetBlockCount() (uint32, error)
|
||||
GetNativeContracts() ([]state.Contract, error)
|
||||
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
|
||||
GetVersion() (*result.Version, error)
|
||||
SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
|
||||
GetCommittee() (keys.PublicKeys, error)
|
||||
CalculateNetworkFee(tx *transaction.Transaction) (int64, error)
|
||||
}
|
||||
|
||||
type HashVUBPair struct {
|
||||
|
@ -48,7 +42,7 @@ type ClientContext struct {
|
|||
SentTxs []HashVUBPair
|
||||
}
|
||||
|
||||
func GetN3Client(v *viper.Viper) (Client, error) {
|
||||
func NewRemoteClient(v *viper.Viper) (Client, error) {
|
||||
// number of opened connections
|
||||
// by neo-go client per one host
|
||||
const (
|
||||
|
@ -88,8 +82,14 @@ func GetN3Client(v *viper.Viper) (Client, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
|
||||
commAct, err := NewActor(c, committeeAcc)
|
||||
func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
|
||||
commAct, err := actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: committeeAcc.Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: committeeAcc,
|
||||
}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -15,10 +15,8 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -87,16 +85,6 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
|
|||
return wallets, nil
|
||||
}
|
||||
|
||||
func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) {
|
||||
return actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: committeeAcc.Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: committeeAcc,
|
||||
}})
|
||||
}
|
||||
|
||||
func ReadContract(ctrPath, ctrName string) (*ContractState, error) {
|
||||
rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef"))
|
||||
if err != nil {
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
|
||||
v := viper.GetViper()
|
||||
c, err := helper.GetN3Client(v)
|
||||
c, err := helper.NewRemoteClient(v)
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName)
|
||||
|
@ -26,7 +26,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
|
|||
}
|
||||
|
||||
func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
|
@ -41,7 +40,8 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
accHash := w.GetChangeAddress()
|
||||
if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
|
||||
addr, _ := cmd.Flags().GetString(walletAccountFlag)
|
||||
if addr != "" {
|
||||
accHash, err = address.StringToUint160(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid address: %s", addr)
|
||||
|
@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("can't find account for %s", accHash)
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
|
||||
prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
|
||||
pass, err := input.ReadPassword(prompt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get password: %v", err)
|
||||
|
@ -73,23 +73,16 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
till := int64(defaultNotaryDepositLifetime)
|
||||
tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tillStr != "" {
|
||||
till, err = strconv.ParseInt(tillStr, 10, 64)
|
||||
if err != nil || till <= 0 {
|
||||
return errInvalidNotaryDepositLifetime
|
||||
}
|
||||
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
|
||||
if till <= 0 {
|
||||
return errInvalidNotaryDepositLifetime
|
||||
}
|
||||
|
||||
return transferGas(cmd, acc, accHash, gasAmount, till)
|
||||
}
|
||||
|
||||
func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
|
|||
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
|
||||
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
|
||||
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
|
||||
DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
|
||||
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
|
||||
func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client:", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -20,23 +20,32 @@ const (
|
|||
accountAddressFlag = "account"
|
||||
)
|
||||
|
||||
func parseAddresses(cmd *cobra.Command) []util.Uint160 {
|
||||
var addrs []util.Uint160
|
||||
|
||||
accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
|
||||
for _, acc := range accs {
|
||||
addr, err := address.StringToUint160(acc)
|
||||
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
|
||||
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func addProxyAccount(cmd *cobra.Command, _ []string) {
|
||||
acc, _ := cmd.Flags().GetString(accountAddressFlag)
|
||||
addr, err := address.StringToUint160(acc)
|
||||
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
|
||||
err = processAccount(cmd, addr, "addAccount")
|
||||
addrs := parseAddresses(cmd)
|
||||
err := processAccount(cmd, addrs, "addAccount")
|
||||
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
|
||||
}
|
||||
|
||||
func removeProxyAccount(cmd *cobra.Command, _ []string) {
|
||||
acc, _ := cmd.Flags().GetString(accountAddressFlag)
|
||||
addr, err := address.StringToUint160(acc)
|
||||
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
|
||||
err = processAccount(cmd, addr, "removeAccount")
|
||||
addrs := parseAddresses(cmd)
|
||||
err := processAccount(cmd, addrs, "removeAccount")
|
||||
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
|
||||
}
|
||||
|
||||
func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
|
||||
func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error {
|
||||
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't initialize context: %w", err)
|
||||
|
@ -54,7 +63,9 @@ func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error
|
|||
}
|
||||
|
||||
bw := io.NewBufBinWriter()
|
||||
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
|
||||
for _, addr := range addrs {
|
||||
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
|
||||
}
|
||||
|
||||
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
|
||||
return err
|
||||
|
|
|
@ -29,13 +29,15 @@ var (
|
|||
|
||||
func initProxyAddAccount() {
|
||||
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
|
||||
AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
|
||||
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
|
||||
AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
func initProxyRemoveAccount() {
|
||||
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
|
||||
RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
|
||||
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
|
||||
RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
|
|||
fatalOnErr(errors.New("can't find account in wallet"))
|
||||
}
|
||||
|
||||
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
|
||||
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
|
||||
fatalOnErr(err)
|
||||
|
||||
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
|
||||
|
@ -78,13 +77,31 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
|
|||
// SortedIDList returns sorted list of identifiers of user's containers.
|
||||
func (x ListContainersRes) SortedIDList() []cid.ID {
|
||||
list := x.cliRes.Containers()
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
|
||||
return strings.Compare(lhs, rhs) < 0
|
||||
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
|
||||
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
|
||||
})
|
||||
return list
|
||||
}
|
||||
|
||||
func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
|
||||
cliPrm := &client.PrmContainerListStream{
|
||||
XHeaders: prm.XHeaders,
|
||||
OwnerID: prm.OwnerID,
|
||||
Session: prm.Session,
|
||||
}
|
||||
rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init container list: %w", err)
|
||||
}
|
||||
|
||||
err = rdr.Iterate(processCnr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read container list: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// PutContainerPrm groups parameters of PutContainer operation.
|
||||
type PutContainerPrm struct {
|
||||
Client *client.Client
|
||||
|
|
|
@ -6,8 +6,11 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// flags of list command.
|
||||
|
@ -51,44 +54,60 @@ var listContainersCmd = &cobra.Command{
|
|||
|
||||
var prm internalclient.ListContainersPrm
|
||||
prm.SetClient(cli)
|
||||
prm.Account = idUser
|
||||
|
||||
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
prm.OwnerID = idUser
|
||||
prmGet := internalclient.GetContainerPrm{
|
||||
Client: cli,
|
||||
}
|
||||
var containerIDs []cid.ID
|
||||
|
||||
err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
|
||||
printContainer(cmd, prmGet, id)
|
||||
return false
|
||||
})
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
|
||||
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
containerIDs = res.SortedIDList()
|
||||
} else {
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
}
|
||||
|
||||
containerIDs := res.SortedIDList()
|
||||
for _, cnrID := range containerIDs {
|
||||
if flagVarListName == "" && !flagVarListPrintAttr {
|
||||
cmd.Println(cnrID.String())
|
||||
continue
|
||||
}
|
||||
|
||||
prmGet.ClientParams.ContainerID = &cnrID
|
||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||
if err != nil {
|
||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
cnr := res.Container()
|
||||
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
|
||||
continue
|
||||
}
|
||||
cmd.Println(cnrID.String())
|
||||
|
||||
if flagVarListPrintAttr {
|
||||
cnr.IterateUserAttributes(func(key, val string) {
|
||||
cmd.Printf(" %s: %s\n", key, val)
|
||||
})
|
||||
}
|
||||
printContainer(cmd, prmGet, cnrID)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
|
||||
if flagVarListName == "" && !flagVarListPrintAttr {
|
||||
cmd.Println(id.String())
|
||||
return
|
||||
}
|
||||
|
||||
prmGet.ClientParams.ContainerID = &id
|
||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||
if err != nil {
|
||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
cnr := res.Container()
|
||||
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
|
||||
return
|
||||
}
|
||||
cmd.Println(id.String())
|
||||
|
||||
if flagVarListPrintAttr {
|
||||
cnr.IterateUserAttributes(func(key, val string) {
|
||||
cmd.Printf(" %s: %s\n", key, val)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func initContainerListContainersCmd() {
|
||||
commonflags.Init(listContainersCmd)
|
||||
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const ignoreErrorsFlag = "no-errors"
|
||||
|
||||
var evacuateShardCmd = &cobra.Command{
|
||||
Use: "evacuate",
|
||||
Short: "Evacuate objects from shard",
|
||||
Long: "Evacuate objects from shard to other shards",
|
||||
Run: evacuateShard,
|
||||
Deprecated: "use frostfs-cli control shards evacuation start",
|
||||
}
|
||||
|
||||
func evacuateShard(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
|
||||
req.Body.Shard_ID = getShardIDList(cmd)
|
||||
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.EvacuateShardResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.EvacuateShard(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Shard has successfully been evacuated.")
|
||||
}
|
||||
|
||||
func initControlEvacuateShardCmd() {
|
||||
initControlFlags(evacuateShardCmd)
|
||||
|
||||
flags := evacuateShardCmd.Flags()
|
||||
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
|
||||
flags.Bool(shardAllFlag, false, "Process all shards")
|
||||
flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
|
||||
|
||||
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
|
||||
}
|
|
@ -17,10 +17,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
awaitFlag = "await"
|
||||
noProgressFlag = "no-progress"
|
||||
scopeFlag = "scope"
|
||||
repOneOnlyFlag = "rep-one-only"
|
||||
awaitFlag = "await"
|
||||
noProgressFlag = "no-progress"
|
||||
scopeFlag = "scope"
|
||||
repOneOnlyFlag = "rep-one-only"
|
||||
ignoreErrorsFlag = "no-errors"
|
||||
|
||||
containerWorkerCountFlag = "container-worker-count"
|
||||
objectWorkerCountFlag = "object-worker-count"
|
||||
|
|
|
@ -13,7 +13,6 @@ var shardsCmd = &cobra.Command{
|
|||
func initControlShardsCmd() {
|
||||
shardsCmd.AddCommand(listShardsCmd)
|
||||
shardsCmd.AddCommand(setShardModeCmd)
|
||||
shardsCmd.AddCommand(evacuateShardCmd)
|
||||
shardsCmd.AddCommand(evacuationShardCmd)
|
||||
shardsCmd.AddCommand(flushCacheCmd)
|
||||
shardsCmd.AddCommand(doctorCmd)
|
||||
|
@ -23,7 +22,6 @@ func initControlShardsCmd() {
|
|||
|
||||
initControlShardsListCmd()
|
||||
initControlSetShardModeCmd()
|
||||
initControlEvacuateShardCmd()
|
||||
initControlEvacuationShardCmd()
|
||||
initControlFlushCacheCmd()
|
||||
initControlDoctorCmd()
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -43,6 +42,8 @@ func initObjectHashCmd() {
|
|||
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
|
||||
|
||||
flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
|
||||
_ = objectHashCmd.MarkFlagRequired("range")
|
||||
|
||||
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
|
||||
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
|
||||
}
|
||||
|
@ -66,36 +67,6 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
|||
pk := key.GetOrGenerate(cmd)
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
|
||||
tz := typ == hashTz
|
||||
fullHash := len(ranges) == 0
|
||||
if fullHash {
|
||||
var headPrm internalclient.HeadObjectPrm
|
||||
headPrm.SetClient(cli)
|
||||
Prepare(cmd, &headPrm)
|
||||
headPrm.SetAddress(objAddr)
|
||||
|
||||
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
|
||||
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
var cs checksum.Checksum
|
||||
var csSet bool
|
||||
|
||||
if tz {
|
||||
cs, csSet = res.Header().PayloadHomomorphicHash()
|
||||
} else {
|
||||
cs, csSet = res.Header().PayloadChecksum()
|
||||
}
|
||||
|
||||
if csSet {
|
||||
cmd.Println(hex.EncodeToString(cs.Value()))
|
||||
} else {
|
||||
cmd.Println("Missing checksum in object header.")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var hashPrm internalclient.HashPayloadRangesPrm
|
||||
hashPrm.SetClient(cli)
|
||||
Prepare(cmd, &hashPrm)
|
||||
|
@ -104,7 +75,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
|||
hashPrm.SetSalt(salt)
|
||||
hashPrm.SetRanges(ranges)
|
||||
|
||||
if tz {
|
||||
if typ == hashTz {
|
||||
hashPrm.TZ()
|
||||
}
|
||||
|
||||
|
|
|
@ -1,15 +1,12 @@
|
|||
package object
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
|
@ -507,7 +504,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
|
|||
}
|
||||
|
||||
func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
|
||||
normilizeObjectNodesResult(objects, result)
|
||||
if json, _ := cmd.Flags().GetBool(commonflags.JSON); json {
|
||||
printObjectNodesAsJSON(cmd, objID, objects, result)
|
||||
} else {
|
||||
|
@ -515,34 +511,6 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
|
|||
}
|
||||
}
|
||||
|
||||
func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) {
|
||||
slices.SortFunc(objects, func(lhs, rhs phyObject) int {
|
||||
if lhs.ecHeader == nil && rhs.ecHeader == nil {
|
||||
return bytes.Compare(lhs.objectID[:], rhs.objectID[:])
|
||||
}
|
||||
if lhs.ecHeader == nil {
|
||||
return -1
|
||||
}
|
||||
if rhs.ecHeader == nil {
|
||||
return 1
|
||||
}
|
||||
if lhs.ecHeader.parent == rhs.ecHeader.parent {
|
||||
return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index)
|
||||
}
|
||||
return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:])
|
||||
})
|
||||
for _, obj := range objects {
|
||||
op := result.placements[obj.objectID]
|
||||
slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
|
||||
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
|
||||
})
|
||||
slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
|
||||
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
|
||||
})
|
||||
result.placements[obj.objectID] = op
|
||||
}
|
||||
}
|
||||
|
||||
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) {
|
|||
log.Info(ctx, c.name+" config updated")
|
||||
if err := c.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
c.init(ctx)
|
||||
|
|
|
@ -119,12 +119,12 @@ func shutdown(ctx context.Context) {
|
|||
innerRing.Stop(ctx)
|
||||
if err := metricsCmp.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
if err := pprofCmp.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) {
|
|||
log.Info(ctx, c.name+" config updated")
|
||||
if err := c.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ func initAPEManagerService(c *cfg) {
|
|||
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
|
||||
|
||||
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
|
||||
c.cfgMorph.client,
|
||||
apemanager.WithLogger(c.log))
|
||||
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
|
||||
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)
|
||||
|
|
|
@ -591,8 +591,6 @@ type cfgMorph struct {
|
|||
|
||||
client *client.Client
|
||||
|
||||
notaryEnabled bool
|
||||
|
||||
// TTL of Sidechain cached values. Non-positive value disables caching.
|
||||
cacheTTL time.Duration
|
||||
|
||||
|
@ -608,9 +606,10 @@ type cfgAccounting struct {
|
|||
type cfgContainer struct {
|
||||
scriptHash neogoutil.Uint160
|
||||
|
||||
parsers map[event.Type]event.NotificationParser
|
||||
subscribers map[event.Type][]event.Handler
|
||||
workerPool util.WorkerPool // pool for asynchronous handlers
|
||||
parsers map[event.Type]event.NotificationParser
|
||||
subscribers map[event.Type][]event.Handler
|
||||
workerPool util.WorkerPool // pool for asynchronous handlers
|
||||
containerBatchSize uint32
|
||||
}
|
||||
|
||||
type cfgFrostfsID struct {
|
||||
|
@ -1121,7 +1120,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
|||
err := ls.Close(context.WithoutCancel(ctx))
|
||||
if err != nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||
|
@ -1211,7 +1210,7 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
|||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1221,9 +1220,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
|||
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
|
||||
// with the binary-encoded information from the current node's configuration.
|
||||
// The state is set using the provided setter which MUST NOT be nil.
|
||||
func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
|
||||
func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
|
||||
ni := c.cfgNodeInfo.localInfo
|
||||
stateSetter(&ni)
|
||||
ni.SetStatus(state)
|
||||
|
||||
prm := nmClient.AddPeerPrm{}
|
||||
prm.SetNodeInfo(ni)
|
||||
|
@ -1233,9 +1232,7 @@ func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.N
|
|||
|
||||
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
|
||||
func bootstrapOnline(ctx context.Context, c *cfg) error {
|
||||
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
|
||||
ni.SetStatus(netmap.Online)
|
||||
})
|
||||
return c.bootstrapWithState(ctx, netmap.Online)
|
||||
}
|
||||
|
||||
// bootstrap calls bootstrapWithState with:
|
||||
|
@ -1246,9 +1243,7 @@ func (c *cfg) bootstrap(ctx context.Context) error {
|
|||
st := c.cfgNetmap.state.controlNetmapStatus()
|
||||
if st == control.NetmapStatus_MAINTENANCE {
|
||||
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
|
||||
ni.SetStatus(netmap.Maintenance)
|
||||
})
|
||||
return c.bootstrapWithState(ctx, netmap.Maintenance)
|
||||
}
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||
|
@ -1466,7 +1461,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
|
|||
func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
|
||||
return container.NewInfoProvider(func() (container.Source, error) {
|
||||
c.initMorphComponents(ctx)
|
||||
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
|
||||
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
27
cmd/frostfs-node/config/container/container.go
Normal file
27
cmd/frostfs-node/config/container/container.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package containerconfig
|
||||
|
||||
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
|
||||
const (
|
||||
subsection = "container"
|
||||
listStreamSubsection = "list_stream"
|
||||
|
||||
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
|
||||
ContainerBatchSizeDefault = 1000
|
||||
)
|
||||
|
||||
// ContainerBatchSize returns the value of "batch_size" config parameter
|
||||
// from "list_stream" subsection of "container" section.
|
||||
//
|
||||
// Returns ContainerBatchSizeDefault if the value is missing or if
|
||||
// the value is not positive integer.
|
||||
func ContainerBatchSize(c *config.Config) uint32 {
|
||||
if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
|
||||
return ContainerBatchSizeDefault
|
||||
}
|
||||
size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
|
||||
if size == 0 {
|
||||
return ContainerBatchSizeDefault
|
||||
}
|
||||
return size
|
||||
}
|
27
cmd/frostfs-node/config/container/container_test.go
Normal file
27
cmd/frostfs-node/config/container/container_test.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package containerconfig_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestContainerSection(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
empty := configtest.EmptyConfig()
|
||||
require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
}
|
|
@ -41,6 +41,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
|
|||
c.Sub(si),
|
||||
)
|
||||
|
||||
if sc.Mode() == mode.Disabled {
|
||||
continue
|
||||
}
|
||||
|
||||
// Path for the blobstor can't be present in the default section, because different shards
|
||||
// must have different paths, so if it is missing, the shard is not here.
|
||||
// At the same time checking for "blobstor" section doesn't work proper
|
||||
|
@ -50,10 +54,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
|
|||
}
|
||||
(*config.Config)(sc).SetDefault(def)
|
||||
|
||||
if sc.Mode() == mode.Disabled {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := f(sc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -18,6 +18,22 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIterateShards(t *testing.T) {
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var res []string
|
||||
require.NoError(t,
|
||||
engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
|
||||
res = append(res, sc.Metabase().Path())
|
||||
return nil
|
||||
}))
|
||||
require.Equal(t, []string{"abc", "xyz"}, res)
|
||||
}
|
||||
|
||||
const cfgDir = "./testdata/shards"
|
||||
configtest.ForEachFileType(cfgDir, fileConfigTest)
|
||||
configtest.ForEnvFileType(t, cfgDir, fileConfigTest)
|
||||
}
|
||||
|
||||
func TestEngineSection(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
empty := configtest.EmptyConfig()
|
||||
|
|
3
cmd/frostfs-node/config/engine/testdata/shards.env
vendored
Normal file
3
cmd/frostfs-node/config/engine/testdata/shards.env
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc
|
||||
FROSTFS_STORAGE_SHARD_1_MODE=disabled
|
||||
FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz
|
13
cmd/frostfs-node/config/engine/testdata/shards.json
vendored
Normal file
13
cmd/frostfs-node/config/engine/testdata/shards.json
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"storage.shard": {
|
||||
"0": {
|
||||
"metabase.path": "abc"
|
||||
},
|
||||
"1": {
|
||||
"mode": "disabled"
|
||||
},
|
||||
"2": {
|
||||
"metabase.path": "xyz"
|
||||
}
|
||||
}
|
||||
}
|
7
cmd/frostfs-node/config/engine/testdata/shards.yaml
vendored
Normal file
7
cmd/frostfs-node/config/engine/testdata/shards.yaml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
storage.shard:
|
||||
0:
|
||||
metabase.path: abc
|
||||
1:
|
||||
mode: disabled
|
||||
2:
|
||||
metabase.path: xyz
|
|
@ -5,6 +5,7 @@ import (
|
|||
"context"
|
||||
"net"
|
||||
|
||||
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
|
||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||
|
@ -28,7 +29,7 @@ import (
|
|||
func initContainerService(_ context.Context, c *cfg) {
|
||||
// container wrapper that tries to invoke notary
|
||||
// requests if chain is configured so
|
||||
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
|
||||
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
|
||||
fatalOnErr(err)
|
||||
|
||||
c.shared.cnrClient = wrap
|
||||
|
@ -47,6 +48,7 @@ func initContainerService(_ context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
||||
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
|
||||
|
||||
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
|
||||
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
|
||||
|
@ -56,7 +58,9 @@ func initContainerService(_ context.Context, c *cfg) {
|
|||
&c.key.PrivateKey,
|
||||
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
|
||||
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
|
||||
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
|
||||
containerService.NewSplitterService(
|
||||
c.cfgContainer.containerBatchSize, c.respSvc,
|
||||
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
|
||||
),
|
||||
)
|
||||
service = containerService.NewAuditService(service, c.log, c.audit)
|
||||
|
@ -218,6 +222,7 @@ type morphContainerReader struct {
|
|||
|
||||
lister interface {
|
||||
ContainersOf(*user.ID) ([]cid.ID, error)
|
||||
IterateContainersOf(*user.ID, func(cid.ID) error) error
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -233,6 +238,10 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
|
|||
return x.lister.ContainersOf(id)
|
||||
}
|
||||
|
||||
func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error {
|
||||
return x.lister.IterateContainersOf(id, processCID)
|
||||
}
|
||||
|
||||
type morphContainerWriter struct {
|
||||
neoClient *cntClient.Client
|
||||
}
|
||||
|
|
|
@ -134,7 +134,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C
|
|||
err := stopper(ctx)
|
||||
if err != nil {
|
||||
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -35,20 +35,16 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
|
|||
|
||||
lookupScriptHashesInNNS(c) // smart contract auto negotiation
|
||||
|
||||
if c.cfgMorph.notaryEnabled {
|
||||
err := c.cfgMorph.client.EnableNotarySupport(
|
||||
client.WithProxyContract(
|
||||
c.cfgMorph.proxyScriptHash,
|
||||
),
|
||||
)
|
||||
fatalOnErr(err)
|
||||
}
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
|
||||
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
||||
err := c.cfgMorph.client.EnableNotarySupport(
|
||||
client.WithProxyContract(
|
||||
c.cfgMorph.proxyScriptHash,
|
||||
),
|
||||
)
|
||||
fatalOnErr(err)
|
||||
|
||||
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
|
||||
c.log.Info(ctx, logs.FrostFSNodeNotarySupport)
|
||||
|
||||
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0)
|
||||
fatalOnErr(err)
|
||||
|
||||
var netmapSource netmap.Source
|
||||
|
@ -100,7 +96,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
|||
if err != nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||
zap.Any("endpoints", addresses),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
fatalOnErr(err)
|
||||
|
@ -116,15 +112,9 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
c.cfgMorph.client = cli
|
||||
c.cfgMorph.notaryEnabled = cli.ProbeNotary()
|
||||
}
|
||||
|
||||
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
||||
// skip notary deposit in non-notary environments
|
||||
if !c.cfgMorph.notaryEnabled {
|
||||
return
|
||||
}
|
||||
|
||||
tx, vub, err := makeNotaryDeposit(ctx, c)
|
||||
fatalOnErr(err)
|
||||
|
||||
|
@ -161,7 +151,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
|
|||
}
|
||||
|
||||
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
|
||||
if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil {
|
||||
if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -178,7 +168,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
|
||||
}
|
||||
|
||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||
|
@ -233,27 +223,17 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse
|
|||
subs map[event.Type][]event.Handler,
|
||||
) {
|
||||
for typ, handlers := range subs {
|
||||
pi := event.NotificationParserInfo{}
|
||||
pi.SetType(typ)
|
||||
pi.SetScriptHash(scHash)
|
||||
|
||||
p, ok := parsers[typ]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("missing parser for event %s", typ))
|
||||
}
|
||||
|
||||
pi.SetParser(p)
|
||||
|
||||
lis.SetNotificationParser(pi)
|
||||
|
||||
for _, h := range handlers {
|
||||
hi := event.NotificationHandlerInfo{}
|
||||
hi.SetType(typ)
|
||||
hi.SetScriptHash(scHash)
|
||||
hi.SetHandler(h)
|
||||
|
||||
lis.RegisterNotificationHandler(hi)
|
||||
}
|
||||
lis.RegisterNotificationHandler(event.NotificationHandlerInfo{
|
||||
Contract: scHash,
|
||||
Type: typ,
|
||||
Parser: p,
|
||||
Handlers: handlers,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,10 +262,6 @@ func lookupScriptHashesInNNS(c *cfg) {
|
|||
)
|
||||
|
||||
for _, t := range targets {
|
||||
if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
|
||||
continue // ignore proxy contract if notary disabled
|
||||
}
|
||||
|
||||
if emptyHash.Equals(*t.h) {
|
||||
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
|
||||
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)
|
||||
|
|
|
@ -193,16 +193,14 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
}
|
||||
})
|
||||
|
||||
if c.cfgMorph.notaryEnabled {
|
||||
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
|
||||
_, _, err := makeNotaryDeposit(ctx, c)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
|
||||
_, _, err := makeNotaryDeposit(ctx, c)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// bootstrapNode adds current node to the Network map.
|
||||
|
@ -425,7 +423,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res)
|
||||
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash)
|
||||
}
|
||||
|
||||
type netInfo struct {
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
||||
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
||||
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||
|
@ -59,7 +58,7 @@ func (c *cfg) MaxObjectSize() uint64 {
|
|||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||
if err != nil {
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -137,24 +136,6 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
type innerRingFetcherWithoutNotary struct {
|
||||
nm *nmClient.Client
|
||||
}
|
||||
|
||||
func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
|
||||
keys, err := f.nm.GetInnerRingList()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
|
||||
}
|
||||
|
||||
result := make([][]byte, 0, len(keys))
|
||||
for i := range keys {
|
||||
result = append(result, keys[i].Bytes())
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func initObjectService(c *cfg) {
|
||||
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
|
||||
|
||||
|
@ -288,7 +269,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
_, err := ls.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}),
|
||||
|
@ -305,13 +286,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
}
|
||||
|
||||
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
|
||||
if c.cfgMorph.client.ProbeNotary() {
|
||||
return &innerRingFetcherWithNotary{
|
||||
sidechain: c.cfgMorph.client,
|
||||
}
|
||||
}
|
||||
return &innerRingFetcherWithoutNotary{
|
||||
nm: c.cfgNetmap.wrapper,
|
||||
return &innerRingFetcherWithNotary{
|
||||
sidechain: c.cfgMorph.client,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ func initTreeService(c *cfg) {
|
|||
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
||||
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||
zap.Stringer("cid", ev.ID),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -83,6 +83,9 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
|
|||
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
|
||||
FROSTFS_REPLICATOR_POOL_SIZE=10
|
||||
|
||||
# Container service section
|
||||
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
|
||||
|
||||
# Object service section
|
||||
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
|
||||
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
|
||||
|
|
|
@ -124,6 +124,11 @@
|
|||
"pool_size": 10,
|
||||
"put_timeout": "15s"
|
||||
},
|
||||
"container": {
|
||||
"list_stream": {
|
||||
"batch_size": "500"
|
||||
}
|
||||
},
|
||||
"object": {
|
||||
"delete": {
|
||||
"tombstone_lifetime": 10
|
||||
|
|
|
@ -108,6 +108,10 @@ replicator:
|
|||
put_timeout: 15s # timeout for the Replicator PUT remote operation
|
||||
pool_size: 10 # maximum amount of concurrent replications
|
||||
|
||||
container:
|
||||
list_stream:
|
||||
batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once
|
||||
|
||||
object:
|
||||
delete:
|
||||
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
|
||||
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
|
||||
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
|
||||
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
|
||||
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json",
|
||||
"FROSTFS_NODE_WALLET_PASSWORD":"",
|
||||
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080",
|
||||
|
@ -98,7 +97,6 @@
|
|||
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
|
||||
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
|
||||
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
|
||||
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
|
||||
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json",
|
||||
"FROSTFS_NODE_WALLET_PASSWORD":"",
|
||||
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082",
|
||||
|
@ -154,7 +152,6 @@
|
|||
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
|
||||
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
|
||||
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
|
||||
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
|
||||
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json",
|
||||
"FROSTFS_NODE_WALLET_PASSWORD":"",
|
||||
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084",
|
||||
|
@ -210,7 +207,6 @@
|
|||
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
|
||||
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
|
||||
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
|
||||
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
|
||||
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json",
|
||||
"FROSTFS_NODE_WALLET_PASSWORD":"",
|
||||
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086",
|
||||
|
|
34
go.mod
34
go.mod
|
@ -4,11 +4,11 @@ go 1.22
|
|||
|
||||
require (
|
||||
code.gitea.io/sdk/gitea v0.17.1
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
|
||||
|
@ -27,7 +27,7 @@ require (
|
|||
github.com/klauspost/compress v1.17.4
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/mr-tron/base58 v1.2.0
|
||||
github.com/multiformats/go-multiaddr v0.12.1
|
||||
github.com/multiformats/go-multiaddr v0.14.0
|
||||
github.com/nspcc-dev/neo-go v0.106.3
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/panjf2000/ants/v2 v2.9.0
|
||||
|
@ -40,15 +40,15 @@ require (
|
|||
github.com/ssgreg/journald v1.0.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
go.etcd.io/bbolt v1.3.10
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
go.opentelemetry.io/otel v1.31.0
|
||||
go.opentelemetry.io/otel/trace v1.31.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.22.0
|
||||
golang.org/x/term v0.21.0
|
||||
google.golang.org/grpc v1.66.2
|
||||
google.golang.org/protobuf v1.34.2
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/term v0.27.0
|
||||
google.golang.org/grpc v1.69.2
|
||||
google.golang.org/protobuf v1.36.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
|
@ -119,15 +119,15 @@ require (
|
|||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
lukechampine.com/blake3 v1.2.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
|
|
BIN
go.sum
BIN
go.sum
Binary file not shown.
|
@ -164,17 +164,9 @@ const (
|
|||
EventNotaryParserNotSet = "notary parser not set"
|
||||
EventCouldNotParseNotaryEvent = "could not parse notary event"
|
||||
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
|
||||
EventIgnoreNilEventParser = "ignore nil event parser"
|
||||
EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser"
|
||||
EventRegisteredNewEventParser = "registered new event parser"
|
||||
EventIgnoreNilEventHandler = "ignore nil event handler"
|
||||
EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser"
|
||||
EventRegisteredNewEventHandler = "registered new event handler"
|
||||
EventIgnoreNilNotaryEventParser = "ignore nil notary event parser"
|
||||
EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser"
|
||||
EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler"
|
||||
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
|
||||
EventIgnoreNilBlockHandler = "ignore nil block handler"
|
||||
StorageOperation = "local object storage operation"
|
||||
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
|
||||
BlobovniczaOpeningBoltDB = "opening BoltDB"
|
||||
|
|
|
@ -12,8 +12,9 @@ type ApplicationInfo struct {
|
|||
func NewApplicationInfo(version string) *ApplicationInfo {
|
||||
appInfo := &ApplicationInfo{
|
||||
versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "app_info",
|
||||
Help: "General information about the application.",
|
||||
Namespace: namespace,
|
||||
Name: "app_info",
|
||||
Help: "General information about the application.",
|
||||
}, []string{"version"}),
|
||||
}
|
||||
appInfo.versionValue.With(prometheus.Labels{"version": version})
|
||||
|
|
|
@ -31,9 +31,7 @@ type RPCActorProvider interface {
|
|||
type ProxyVerificationContractStorage struct {
|
||||
rpcActorProvider RPCActorProvider
|
||||
|
||||
acc *wallet.Account
|
||||
|
||||
proxyScriptHash util.Uint160
|
||||
cosigners []actor.SignerAccount
|
||||
|
||||
policyScriptHash util.Uint160
|
||||
}
|
||||
|
@ -41,12 +39,27 @@ type ProxyVerificationContractStorage struct {
|
|||
var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil)
|
||||
|
||||
func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage {
|
||||
acc := wallet.NewAccountFromPrivateKey(key)
|
||||
return &ProxyVerificationContractStorage{
|
||||
rpcActorProvider: rpcActorProvider,
|
||||
|
||||
acc: wallet.NewAccountFromPrivateKey(key),
|
||||
|
||||
proxyScriptHash: proxyScriptHash,
|
||||
cosigners: []actor.SignerAccount{
|
||||
{
|
||||
Signer: transaction.Signer{
|
||||
Account: proxyScriptHash,
|
||||
Scopes: transaction.CustomContracts,
|
||||
AllowedContracts: []util.Uint160{policyScriptHash},
|
||||
},
|
||||
Account: notary.FakeContractAccount(proxyScriptHash),
|
||||
},
|
||||
{
|
||||
Signer: transaction.Signer{
|
||||
Account: acc.Contract.ScriptHash(),
|
||||
Scopes: transaction.CalledByEntry,
|
||||
},
|
||||
Account: acc,
|
||||
},
|
||||
},
|
||||
|
||||
policyScriptHash: policyScriptHash,
|
||||
}
|
||||
|
@ -64,7 +77,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke {
|
|||
|
||||
func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) {
|
||||
rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
|
||||
act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash))
|
||||
act, err := actor.New(rpcActor, contractStorage.cosigners)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -98,31 +111,16 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na
|
|||
|
||||
// ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners.
|
||||
func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
|
||||
// contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
|
||||
// ProxyVerificationContractStorage does not manage reconnections.
|
||||
contractStorageActor, err := contractStorage.newContractStorageActor()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
|
||||
rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
|
||||
inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor}
|
||||
return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
|
||||
}
|
||||
|
||||
func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount {
|
||||
return []actor.SignerAccount{
|
||||
{
|
||||
Signer: transaction.Signer{
|
||||
Account: proxyScriptHash,
|
||||
Scopes: transaction.CustomContracts,
|
||||
AllowedContracts: []util.Uint160{policyScriptHash},
|
||||
},
|
||||
Account: notary.FakeContractAccount(proxyScriptHash),
|
||||
},
|
||||
{
|
||||
Signer: transaction.Signer{
|
||||
Account: acc.Contract.ScriptHash(),
|
||||
Scopes: transaction.CalledByEntry,
|
||||
},
|
||||
Account: acc,
|
||||
},
|
||||
}
|
||||
type invokerAdapter struct {
|
||||
*invoker.Invoker
|
||||
rpcInvoker invoker.RPCInvoke
|
||||
}
|
||||
|
||||
func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
|
||||
return n.rpcInvoker
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
|
|||
if err != nil {
|
||||
// do not throw error, try best case matching
|
||||
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
} else if isInnerRingNode {
|
||||
return &ClassifyResult{
|
||||
Role: acl.RoleInnerRing,
|
||||
|
@ -84,7 +84,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
|
|||
// is not possible for previous epoch, so
|
||||
// do not throw error, try best case matching
|
||||
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
} else if isContainerNode {
|
||||
return &ClassifyResult{
|
||||
Role: acl.RoleContainer,
|
||||
|
|
|
@ -8,7 +8,6 @@ type (
|
|||
// ContractProcessor interface defines functions for binding event producers
|
||||
// such as event.Listener and Timers with contract processor.
|
||||
ContractProcessor interface {
|
||||
ListenerNotificationParsers() []event.NotificationParserInfo
|
||||
ListenerNotificationHandlers() []event.NotificationHandlerInfo
|
||||
ListenerNotaryParsers() []event.NotaryParserInfo
|
||||
ListenerNotaryHandlers() []event.NotaryHandlerInfo
|
||||
|
@ -16,11 +15,6 @@ type (
|
|||
)
|
||||
|
||||
func connectListenerWithProcessor(l event.Listener, p ContractProcessor) {
|
||||
// register notification parsers
|
||||
for _, parser := range p.ListenerNotificationParsers() {
|
||||
l.SetNotificationParser(parser)
|
||||
}
|
||||
|
||||
// register notification handlers
|
||||
for _, handler := range p.ListenerNotificationHandlers() {
|
||||
l.RegisterNotificationHandler(handler)
|
||||
|
|
|
@ -100,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
|
|||
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromMainChainBlock = 0
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
|
||||
}
|
||||
mainnetChain.from = fromMainChainBlock
|
||||
|
||||
|
@ -380,7 +380,6 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
|
|||
// form morph container client's options
|
||||
morphCnrOpts := make([]container.Option, 0, 3)
|
||||
morphCnrOpts = append(morphCnrOpts,
|
||||
container.TryNotary(),
|
||||
container.AsAlphabet(),
|
||||
)
|
||||
|
||||
|
@ -390,12 +389,12 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
|
|||
}
|
||||
s.containerClient = result.CnrClient
|
||||
|
||||
s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet())
|
||||
s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet())
|
||||
s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -457,7 +456,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
|||
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
|
||||
}
|
||||
|
||||
morphChain := &chainParams{
|
||||
|
|
|
@ -177,7 +177,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
|||
if err != nil {
|
||||
// we don't stop inner ring execution on this error
|
||||
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
s.tickInitialExpoch(ctx)
|
||||
|
@ -308,7 +308,7 @@ func (s *Server) Stop(ctx context.Context) {
|
|||
for _, c := range s.closers {
|
||||
if err := c(); err != nil {
|
||||
s.log.Warn(ctx, logs.InnerringCloserError,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
|
|||
// there is no signature collecting, so we don't need extra fee
|
||||
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
|
||||
if err != nil {
|
||||
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
|
||||
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
|
|||
networkMap, err := ap.netmapClient.NetMap()
|
||||
if err != nil {
|
||||
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
|
|||
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
||||
if err != nil {
|
||||
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
|
|||
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
|
||||
zap.String("receiver", key.Address()),
|
||||
zap.Int64("amount", int64(gasPerNode)),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint
|
|||
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
|
||||
zap.Strings("receivers", receiversLog),
|
||||
zap.Int64("amount", int64(gasPerNode)),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,11 +114,6 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) {
|
|||
ap.pwLock.Unlock()
|
||||
}
|
||||
|
||||
// ListenerNotificationParsers for the 'event.Listener' event producer.
|
||||
func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListenerNotificationHandlers for the 'event.Listener' event producer.
|
||||
func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
|
||||
return nil
|
||||
|
|
|
@ -88,32 +88,16 @@ func New(p *Params) (*Processor, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// ListenerNotificationParsers for the 'event.Listener' event producer.
|
||||
func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
|
||||
var parsers []event.NotificationParserInfo
|
||||
|
||||
// new lock event
|
||||
lock := event.NotificationParserInfo{}
|
||||
lock.SetType(lockNotification)
|
||||
lock.SetScriptHash(bp.balanceSC)
|
||||
lock.SetParser(balanceEvent.ParseLock)
|
||||
parsers = append(parsers, lock)
|
||||
|
||||
return parsers
|
||||
}
|
||||
|
||||
// ListenerNotificationHandlers for the 'event.Listener' event producer.
|
||||
func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
|
||||
var handlers []event.NotificationHandlerInfo
|
||||
|
||||
// lock handler
|
||||
lock := event.NotificationHandlerInfo{}
|
||||
lock.SetType(lockNotification)
|
||||
lock.SetScriptHash(bp.balanceSC)
|
||||
lock.SetHandler(bp.handleLock)
|
||||
handlers = append(handlers, lock)
|
||||
|
||||
return handlers
|
||||
return []event.NotificationHandlerInfo{
|
||||
{
|
||||
Contract: bp.balanceSC,
|
||||
Type: lockNotification,
|
||||
Parser: balanceEvent.ParseLock,
|
||||
Handlers: []event.Handler{bp.handleLock},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ListenerNotaryParsers for the 'event.Listener' event producer.
|
||||
|
|
|
@ -50,7 +50,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
|
|||
err := cp.checkPutContainer(pctx)
|
||||
if err != nil {
|
||||
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return false
|
||||
|
@ -58,7 +58,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
|
|||
|
||||
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
|
||||
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
|
|||
err := cp.checkDeleteContainer(e)
|
||||
if err != nil {
|
||||
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return false
|
||||
|
@ -121,7 +121,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
|
|||
|
||||
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
|
||||
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return false
|
||||
|
|
|
@ -118,11 +118,6 @@ func New(p *Params) (*Processor, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// ListenerNotificationParsers for the 'event.Listener' event producer.
|
||||
func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListenerNotificationHandlers for the 'event.Listener' event producer.
|
||||
func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
|
||||
return nil
|
||||
|
|
|
@ -73,7 +73,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
|
|||
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
|
||||
if err != nil {
|
||||
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -142,70 +142,34 @@ func New(p *Params) (*Processor, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// ListenerNotificationParsers for the 'event.Listener' event producer.
|
||||
func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
|
||||
var (
|
||||
parsers = make([]event.NotificationParserInfo, 0, 6)
|
||||
|
||||
p event.NotificationParserInfo
|
||||
)
|
||||
|
||||
p.SetScriptHash(np.frostfsContract)
|
||||
|
||||
// deposit event
|
||||
p.SetType(event.TypeFromString(depositNotification))
|
||||
p.SetParser(frostfsEvent.ParseDeposit)
|
||||
parsers = append(parsers, p)
|
||||
|
||||
// withdraw event
|
||||
p.SetType(event.TypeFromString(withdrawNotification))
|
||||
p.SetParser(frostfsEvent.ParseWithdraw)
|
||||
parsers = append(parsers, p)
|
||||
|
||||
// cheque event
|
||||
p.SetType(event.TypeFromString(chequeNotification))
|
||||
p.SetParser(frostfsEvent.ParseCheque)
|
||||
parsers = append(parsers, p)
|
||||
|
||||
// config event
|
||||
p.SetType(event.TypeFromString(configNotification))
|
||||
p.SetParser(frostfsEvent.ParseConfig)
|
||||
parsers = append(parsers, p)
|
||||
|
||||
return parsers
|
||||
}
|
||||
|
||||
// ListenerNotificationHandlers for the 'event.Listener' event producer.
|
||||
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
|
||||
var (
|
||||
handlers = make([]event.NotificationHandlerInfo, 0, 6)
|
||||
|
||||
h event.NotificationHandlerInfo
|
||||
)
|
||||
|
||||
h.SetScriptHash(np.frostfsContract)
|
||||
|
||||
// deposit handler
|
||||
h.SetType(event.TypeFromString(depositNotification))
|
||||
h.SetHandler(np.handleDeposit)
|
||||
handlers = append(handlers, h)
|
||||
|
||||
// withdraw handler
|
||||
h.SetType(event.TypeFromString(withdrawNotification))
|
||||
h.SetHandler(np.handleWithdraw)
|
||||
handlers = append(handlers, h)
|
||||
|
||||
// cheque handler
|
||||
h.SetType(event.TypeFromString(chequeNotification))
|
||||
h.SetHandler(np.handleCheque)
|
||||
handlers = append(handlers, h)
|
||||
|
||||
// config handler
|
||||
h.SetType(event.TypeFromString(configNotification))
|
||||
h.SetHandler(np.handleConfig)
|
||||
handlers = append(handlers, h)
|
||||
|
||||
return handlers
|
||||
return []event.NotificationHandlerInfo{
|
||||
{
|
||||
Contract: np.frostfsContract,
|
||||
Type: event.TypeFromString(depositNotification),
|
||||
Parser: frostfsEvent.ParseDeposit,
|
||||
Handlers: []event.Handler{np.handleDeposit},
|
||||
},
|
||||
{
|
||||
Contract: np.frostfsContract,
|
||||
Type: event.TypeFromString(withdrawNotification),
|
||||
Parser: frostfsEvent.ParseWithdraw,
|
||||
Handlers: []event.Handler{np.handleWithdraw},
|
||||
},
|
||||
{
|
||||
Contract: np.frostfsContract,
|
||||
Type: event.TypeFromString(chequeNotification),
|
||||
Parser: frostfsEvent.ParseCheque,
|
||||
Handlers: []event.Handler{np.handleCheque},
|
||||
},
|
||||
{
|
||||
Contract: np.frostfsContract,
|
||||
Type: event.TypeFromString(configNotification),
|
||||
Parser: frostfsEvent.ParseConfig,
|
||||
Handlers: []event.Handler{np.handleConfig},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ListenerNotaryParsers for the 'event.Listener' event producer.
|
||||
|
|
|
@ -28,21 +28,21 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
|
|||
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
sidechainAlphabet, err := gp.morphClient.Committee()
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
|
|||
err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// 2. Update NeoFSAlphabet role in the sidechain.
|
||||
|
@ -98,14 +98,14 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
|
|||
innerRing, err := gp.irFetcher.InnerRingKeys()
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
|
|||
|
||||
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabe
|
|||
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -155,6 +155,6 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph
|
|||
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -155,22 +155,16 @@ func New(p *Params) (*Processor, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// ListenerNotificationParsers for the 'event.Listener' event producer.
|
||||
func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
|
||||
var pi event.NotificationParserInfo
|
||||
pi.SetScriptHash(gp.designate)
|
||||
pi.SetType(event.TypeFromString(native.DesignationEventName))
|
||||
pi.SetParser(rolemanagement.ParseDesignate)
|
||||
return []event.NotificationParserInfo{pi}
|
||||
}
|
||||
|
||||
// ListenerNotificationHandlers for the 'event.Listener' event producer.
|
||||
func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
|
||||
var hi event.NotificationHandlerInfo
|
||||
hi.SetScriptHash(gp.designate)
|
||||
hi.SetType(event.TypeFromString(native.DesignationEventName))
|
||||
hi.SetHandler(gp.HandleAlphabetSync)
|
||||
return []event.NotificationHandlerInfo{hi}
|
||||
return []event.NotificationHandlerInfo{
|
||||
{
|
||||
Contract: gp.designate,
|
||||
Type: event.TypeFromString(native.DesignationEventName),
|
||||
Parser: rolemanagement.ParseDesignate,
|
||||
Handlers: []event.Handler{gp.HandleAlphabetSync},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ListenerNotaryParsers for the 'event.Listener' event producer.
|
||||
|
|
|
@ -49,7 +49,7 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
|
|||
})
|
||||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
|
|||
epochDuration, err := np.netmapClient.EpochDuration()
|
||||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
} else {
|
||||
np.epochState.SetEpochDuration(epochDuration)
|
||||
}
|
||||
|
@ -28,19 +28,19 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
|
|||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
|
||||
zap.String("hash", ev.TxHash().StringLE()),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// get new netmap snapshot
|
||||
networkMap, err := np.netmapClient.NetMap()
|
||||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
|
|||
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
||||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return false
|
||||
|
|
|
@ -161,36 +161,16 @@ func New(p *Params) (*Processor, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// ListenerNotificationParsers for the 'event.Listener' event producer.
|
||||
func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
|
||||
parsers := make([]event.NotificationParserInfo, 0, 3)
|
||||
|
||||
var p event.NotificationParserInfo
|
||||
|
||||
p.SetScriptHash(np.netmapClient.ContractAddress())
|
||||
|
||||
// new epoch event
|
||||
p.SetType(newEpochNotification)
|
||||
p.SetParser(netmapEvent.ParseNewEpoch)
|
||||
parsers = append(parsers, p)
|
||||
|
||||
return parsers
|
||||
}
|
||||
|
||||
// ListenerNotificationHandlers for the 'event.Listener' event producer.
|
||||
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
|
||||
handlers := make([]event.NotificationHandlerInfo, 0, 3)
|
||||
|
||||
var i event.NotificationHandlerInfo
|
||||
|
||||
i.SetScriptHash(np.netmapClient.ContractAddress())
|
||||
|
||||
// new epoch handler
|
||||
i.SetType(newEpochNotification)
|
||||
i.SetHandler(np.handleNewEpoch)
|
||||
handlers = append(handlers, i)
|
||||
|
||||
return handlers
|
||||
return []event.NotificationHandlerInfo{
|
||||
{
|
||||
Contract: np.netmapClient.ContractAddress(),
|
||||
Type: newEpochNotification,
|
||||
Parser: netmapEvent.ParseNewEpoch,
|
||||
Handlers: []event.Handler{np.handleNewEpoch},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ListenerNotaryParsers for the 'event.Listener' event producer.
|
||||
|
|
|
@ -62,7 +62,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool {
|
|||
func (s *Server) InnerRingIndex(ctx context.Context) int {
|
||||
index, err := s.statusIndex.InnerRingIndex()
|
||||
if err != nil {
|
||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
|
||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
|
||||
return -1
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int {
|
|||
func (s *Server) InnerRingSize(ctx context.Context) int {
|
||||
size, err := s.statusIndex.InnerRingSize()
|
||||
if err != nil {
|
||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
|
||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
|
|||
func (s *Server) AlphabetIndex(ctx context.Context) int {
|
||||
index, err := s.statusIndex.AlphabetIndex()
|
||||
if err != nil {
|
||||
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
|
||||
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
|
||||
return -1
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
|
|||
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
|
||||
zap.Int8("alphabet_index", int8(letter)),
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
|
|||
})
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't determine DB size: %w", err)
|
||||
return fmt.Errorf("determine DB size: %w", err)
|
||||
}
|
||||
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
|
||||
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
|
@ -140,7 +140,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
|
|||
return saveItemsCount(tx, items)
|
||||
}); err != nil {
|
||||
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
|
||||
return fmt.Errorf("save blobovnicza's size and items count: %w", err)
|
||||
}
|
||||
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes,
|
|||
if prm.ignoreErrors {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not decode address key: %w", err)
|
||||
return fmt.Errorf("decode address key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
|
|||
if !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
|
|||
if !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
|
|||
if !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||
)
|
||||
}
|
||||
|
@ -115,13 +115,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic
|
|||
// decompress the data
|
||||
data, err := b.compression.Decompress(res.Object())
|
||||
if err != nil {
|
||||
return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
|
||||
return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
|
||||
}
|
||||
|
||||
// unmarshal the object
|
||||
obj := objectSDK.New()
|
||||
if err := obj.Unmarshal(data); err != nil {
|
||||
return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
|
||||
return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
|
||||
}
|
||||
|
||||
return common.GetRes{Object: obj, RawData: data}, nil
|
||||
|
|
|
@ -71,7 +71,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
|
|||
if !outOfBounds && !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
if outOfBounds {
|
||||
|
@ -130,13 +130,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob
|
|||
// decompress the data
|
||||
data, err := b.compression.Decompress(res.Object())
|
||||
if err != nil {
|
||||
return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err)
|
||||
return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err)
|
||||
}
|
||||
|
||||
// unmarshal the object
|
||||
obj := objectSDK.New()
|
||||
if err := obj.Unmarshal(data); err != nil {
|
||||
return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
|
||||
return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err)
|
||||
}
|
||||
|
||||
from := prm.Range.GetOffset()
|
||||
|
|
|
@ -44,12 +44,12 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
|
|||
if prm.IgnoreErrors {
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.Stringer("address", elem.Address()),
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("storage_id", p),
|
||||
zap.String("root_path", b.rootPath))
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not decompress object data: %w", err)
|
||||
return fmt.Errorf("decompress object data: %w", err)
|
||||
}
|
||||
|
||||
if prm.Handler != nil {
|
||||
|
@ -77,12 +77,12 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
|
|||
if err != nil {
|
||||
if ignoreErrors {
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("storage_id", p),
|
||||
zap.String("root_path", b.rootPath))
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
|
||||
return false, fmt.Errorf("open blobovnicza %s: %w", p, err)
|
||||
}
|
||||
defer shBlz.Close(ctx)
|
||||
|
||||
|
|
|
@ -69,10 +69,10 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
|
|||
)...)
|
||||
|
||||
if err := blz.Open(ctx); err != nil {
|
||||
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
|
||||
return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err)
|
||||
}
|
||||
if err := blz.Init(ctx); err != nil {
|
||||
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
|
||||
return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err)
|
||||
}
|
||||
|
||||
b.refCount++
|
||||
|
@ -97,7 +97,7 @@ func (b *sharedDB) Close(ctx context.Context) {
|
|||
if err := b.blcza.Close(ctx); err != nil {
|
||||
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||
zap.String("id", b.path),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
b.blcza = nil
|
||||
|
@ -125,9 +125,9 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
|
|||
if err := b.blcza.Close(ctx); err != nil {
|
||||
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||
zap.String("id", b.path),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err)
|
||||
return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err)
|
||||
}
|
||||
|
||||
b.refCount = 0
|
||||
|
|
|
@ -83,7 +83,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
|||
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
||||
} else {
|
||||
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
|||
} else {
|
||||
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
|
||||
zap.String("path", active.SystemPath()),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
if errors.Is(err, blobovnicza.ErrNoSpace) {
|
||||
|
|
|
@ -74,7 +74,7 @@ func (b *BlobStor) Close(ctx context.Context) error {
|
|||
for i := range b.storage {
|
||||
err := b.storage[i].Storage.Close(ctx)
|
||||
if err != nil {
|
||||
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
|
||||
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err))
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
|
|||
for _, err := range errors[:len(errors)-1] {
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
|
||||
zap.Stringer("address", prm.Address),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
|||
if err != nil {
|
||||
if prm.IgnoreErrors {
|
||||
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("directory_path", dirPath))
|
||||
return nil
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
|||
if prm.IgnoreErrors {
|
||||
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.Stringer("address", addr),
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("path", path))
|
||||
continue
|
||||
}
|
||||
|
@ -538,7 +538,7 @@ func (t *FSTree) countFiles() (uint64, uint64, error) {
|
|||
},
|
||||
)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
|
||||
return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
|
||||
}
|
||||
|
||||
return count, size, nil
|
||||
|
@ -577,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
|
|||
},
|
||||
)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
|
||||
return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
|
||||
}
|
||||
success = true
|
||||
return result, nil
|
||||
|
|
|
@ -45,7 +45,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
|
|||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.String("storage_path", b.storage[i].Storage.Path()),
|
||||
zap.String("storage_type", b.storage[i].Storage.Type()),
|
||||
zap.String("err", err.Error()))
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)
|
||||
|
|
|
@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes,
|
|||
// Decompress the data.
|
||||
var err error
|
||||
if data, err = s.compression.Decompress(data); err != nil {
|
||||
return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
|
||||
return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
|
||||
}
|
||||
|
||||
// Unmarshal the SDK object.
|
||||
obj := objectSDK.New()
|
||||
if err := obj.Unmarshal(data); err != nil {
|
||||
return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
|
||||
return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
|
||||
}
|
||||
|
||||
return common.GetRes{Object: obj, RawData: data}, nil
|
||||
|
|
|
@ -27,7 +27,7 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
|
||||
return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
|
||||
}
|
||||
|
||||
b.mode = m
|
||||
|
|
|
@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
|
|||
// marshal object
|
||||
data, err := prm.Object.Marshal()
|
||||
if err != nil {
|
||||
return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err)
|
||||
return common.PutRes{}, fmt.Errorf("marshal the object: %w", err)
|
||||
}
|
||||
prm.RawData = data
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
|
|||
err := eg.Wait()
|
||||
close(errCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize shards: %w", err)
|
||||
return fmt.Errorf("initialize shards: %w", err)
|
||||
}
|
||||
|
||||
for res := range errCh {
|
||||
|
@ -117,7 +117,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
|
|||
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err)
|
||||
return fmt.Errorf("initialize shard %s: %w", res.id, res.err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ func (e *StorageEngine) close(ctx context.Context, releasePools bool) error {
|
|||
if err := sh.Close(ctx); err != nil {
|
||||
e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
|
||||
zap.String("id", id),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ loop:
|
|||
for _, newID := range shardsToAdd {
|
||||
sh, err := e.createShard(ctx, rcfg.shards[newID])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err)
|
||||
return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err)
|
||||
}
|
||||
|
||||
idStr := sh.ID().String()
|
||||
|
@ -331,13 +331,13 @@ loop:
|
|||
}
|
||||
if err != nil {
|
||||
_ = sh.Close(ctx)
|
||||
return fmt.Errorf("could not init %s shard: %w", idStr, err)
|
||||
return fmt.Errorf("init %s shard: %w", idStr, err)
|
||||
}
|
||||
|
||||
err = e.addShard(sh)
|
||||
if err != nil {
|
||||
_ = sh.Close(ctx)
|
||||
return fmt.Errorf("could not add %s shard: %w", idStr, err)
|
||||
return fmt.Errorf("add %s shard: %w", idStr, err)
|
||||
}
|
||||
|
||||
e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))
|
||||
|
|
|
@ -154,7 +154,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
|||
if err != nil {
|
||||
e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return false
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
|||
if err != nil {
|
||||
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
continue
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ func (e *StorageEngine) deleteChunks(
|
|||
if err != nil {
|
||||
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ func (e *StorageEngine) reportShardError(
|
|||
if isLogical(err) {
|
||||
e.log.Warn(ctx, msg,
|
||||
zap.Stringer("shard_id", sh.ID()),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ func (e *StorageEngine) reportShardError(
|
|||
e.log.Warn(ctx, msg, append([]zap.Field{
|
||||
zap.Stringer("shard_id", sid),
|
||||
zap.Uint32("error count", errCount),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
}, fields...)...)
|
||||
|
||||
if e.errorsThreshold == 0 || errCount < e.errorsThreshold {
|
||||
|
|
|
@ -17,10 +17,12 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type epochState struct{}
|
||||
type epochState struct {
|
||||
currEpoch uint64
|
||||
}
|
||||
|
||||
func (s epochState) CurrentEpoch() uint64 {
|
||||
return 0
|
||||
return s.currEpoch
|
||||
}
|
||||
|
||||
type testEngineWrapper struct {
|
||||
|
@ -87,12 +89,16 @@ func testGetDefaultShardOptions(t testing.TB) []shard.Option {
|
|||
blobstor.WithLogger(test.NewLogger(t)),
|
||||
),
|
||||
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
|
||||
shard.WithMetaBaseOptions(
|
||||
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
||||
meta.WithPermissions(0o700),
|
||||
meta.WithEpochState(epochState{}),
|
||||
meta.WithLogger(test.NewLogger(t)),
|
||||
),
|
||||
shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...),
|
||||
}
|
||||
}
|
||||
|
||||
func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option {
|
||||
return []meta.Option{
|
||||
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
||||
meta.WithPermissions(0o700),
|
||||
meta.WithEpochState(epochState{}),
|
||||
meta.WithLogger(test.NewLogger(t)),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,6 @@ type EvacuateShardPrm struct {
|
|||
ObjectsHandler func(context.Context, oid.Address, *objectSDK.Object) (bool, error)
|
||||
TreeHandler func(context.Context, cid.ID, string, pilorama.Forest) (bool, string, error)
|
||||
IgnoreErrors bool
|
||||
Async bool
|
||||
Scope EvacuateScope
|
||||
RepOneOnly bool
|
||||
|
||||
|
@ -211,10 +210,10 @@ var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
|
|||
|
||||
// Evacuate moves data from one shard to the others.
|
||||
// The shard being moved must be in read-only mode.
|
||||
func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*EvacuateShardRes, error) {
|
||||
func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
|
@ -226,7 +225,6 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
|
|||
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate",
|
||||
trace.WithAttributes(
|
||||
attribute.StringSlice("shardIDs", shardIDs),
|
||||
attribute.Bool("async", prm.Async),
|
||||
attribute.Bool("ignoreErrors", prm.IgnoreErrors),
|
||||
attribute.Stringer("scope", prm.Scope),
|
||||
))
|
||||
|
@ -234,7 +232,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
|
|||
|
||||
shards, err := e.getActualShards(shardIDs, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
shardsToEvacuate := make(map[string]*shard.Shard)
|
||||
|
@ -247,10 +245,10 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
|
|||
}
|
||||
|
||||
res := NewEvacuateShardRes()
|
||||
ctx = ctxOrBackground(ctx, prm.Async)
|
||||
eg, egCtx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
|
||||
ctx = context.WithoutCancel(ctx)
|
||||
eg, ctx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
var mtx sync.RWMutex
|
||||
|
@ -262,21 +260,10 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
|
|||
return t
|
||||
}
|
||||
eg.Go(func() error {
|
||||
return e.evacuateShards(egCtx, shardIDs, prm, res, copyShards, shardsToEvacuate)
|
||||
return e.evacuateShards(ctx, shardIDs, prm, res, copyShards, shardsToEvacuate)
|
||||
})
|
||||
|
||||
if prm.Async {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return res, eg.Wait()
|
||||
}
|
||||
|
||||
func ctxOrBackground(ctx context.Context, background bool) context.Context {
|
||||
if background {
|
||||
return context.Background()
|
||||
}
|
||||
return ctx
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
|
||||
|
@ -286,7 +273,6 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
|
|||
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
|
||||
trace.WithAttributes(
|
||||
attribute.StringSlice("shardIDs", shardIDs),
|
||||
attribute.Bool("async", prm.Async),
|
||||
attribute.Bool("ignoreErrors", prm.IgnoreErrors),
|
||||
attribute.Stringer("scope", prm.Scope),
|
||||
attribute.Bool("repOneOnly", prm.RepOneOnly),
|
||||
|
@ -592,7 +578,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
|
|||
|
||||
func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) {
|
||||
if prm.TreeHandler == nil {
|
||||
return false, "", fmt.Errorf("failed to evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
|
||||
return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
|
||||
}
|
||||
|
||||
return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue