Compare commits
No commits in common. "master" and "master" have entirely different histories.
148 changed files with 598 additions and 1051 deletions
10
Makefile
10
Makefile
|
@ -270,12 +270,10 @@ env-up: all
|
||||||
echo "Frostfs contracts not found"; exit 1; \
|
echo "Frostfs contracts not found"; exit 1; \
|
||||||
fi
|
fi
|
||||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
|
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
|
||||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
|
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
|
||||||
--storage-wallet ./dev/storage/wallet01.json \
|
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
|
||||||
--storage-wallet ./dev/storage/wallet02.json \
|
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
|
||||||
--storage-wallet ./dev/storage/wallet03.json \
|
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
|
||||||
--storage-wallet ./dev/storage/wallet04.json
|
|
||||||
|
|
||||||
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
|
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
|
||||||
make locode-download; \
|
make locode-download; \
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||||
|
@ -140,29 +141,60 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
|
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
|
||||||
walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
|
return refillGas(cmd, storageGasConfigFlag, true)
|
||||||
w, err := wallet.NewWallet(walletPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("create wallet: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
|
|
||||||
password, err := config.GetStoragePassword(viper.GetViper(), label)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("can't fetch password: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if label == "" {
|
|
||||||
label = constants.SingleAccountName
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.CreateAccount(label, password); err != nil {
|
|
||||||
return fmt.Errorf("can't create account: %w", err)
|
|
||||||
}
|
|
||||||
return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
|
func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
|
||||||
|
// storage wallet path is not part of the config
|
||||||
|
storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
|
||||||
|
// wallet address is not part of the config
|
||||||
|
walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
|
||||||
|
|
||||||
|
var gasReceiver util.Uint160
|
||||||
|
|
||||||
|
if len(walletAddress) != 0 {
|
||||||
|
gasReceiver, err = address.StringToUint160(walletAddress)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if storageWalletPath == "" {
|
||||||
|
return fmt.Errorf("missing wallet path (use '--%s <out.json>')", commonflags.StorageWalletFlag)
|
||||||
|
}
|
||||||
|
|
||||||
|
var w *wallet.Wallet
|
||||||
|
|
||||||
|
if createWallet {
|
||||||
|
w, err = wallet.NewWallet(storageWalletPath)
|
||||||
|
} else {
|
||||||
|
w, err = wallet.NewWalletFromFile(storageWalletPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't create wallet: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if createWallet {
|
||||||
|
var password string
|
||||||
|
|
||||||
|
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
|
||||||
|
password, err := config.GetStoragePassword(viper.GetViper(), label)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't fetch password: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if label == "" {
|
||||||
|
label = constants.SingleAccountName
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.CreateAccount(label, password); err != nil {
|
||||||
|
return fmt.Errorf("can't create account: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gasReceiver = w.Accounts[0].Contract.ScriptHash()
|
||||||
|
}
|
||||||
|
|
||||||
gasStr := viper.GetString(gasFlag)
|
gasStr := viper.GetString(gasFlag)
|
||||||
|
|
||||||
gasAmount, err := helper.ParseGASAmount(gasStr)
|
gasAmount, err := helper.ParseGASAmount(gasStr)
|
||||||
|
@ -176,11 +208,9 @@ func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160)
|
||||||
}
|
}
|
||||||
|
|
||||||
bw := io.NewBufBinWriter()
|
bw := io.NewBufBinWriter()
|
||||||
for _, gasReceiver := range gasReceivers {
|
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
|
||||||
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
|
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
|
||||||
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
|
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
|
||||||
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
|
|
||||||
}
|
|
||||||
if bw.Err != nil {
|
if bw.Err != nil {
|
||||||
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
|
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,7 @@
|
||||||
package generate
|
package generate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
)
|
)
|
||||||
|
@ -38,27 +33,7 @@ var (
|
||||||
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
|
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
|
||||||
},
|
},
|
||||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||||
storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
|
return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
|
||||||
walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
|
|
||||||
|
|
||||||
var gasReceivers []util.Uint160
|
|
||||||
for _, walletAddress := range walletAddresses {
|
|
||||||
addr, err := address.StringToUint160(walletAddress)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gasReceivers = append(gasReceivers, addr)
|
|
||||||
}
|
|
||||||
for _, storageWalletPath := range storageWalletPaths {
|
|
||||||
w, err := wallet.NewWalletFromFile(storageWalletPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("can't create wallet: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
|
|
||||||
}
|
|
||||||
return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
GenerateAlphabetCmd = &cobra.Command{
|
GenerateAlphabetCmd = &cobra.Command{
|
||||||
|
@ -75,10 +50,10 @@ var (
|
||||||
func initRefillGasCmd() {
|
func initRefillGasCmd() {
|
||||||
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||||
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||||
RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
|
RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
|
||||||
RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
|
RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
|
||||||
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
|
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
|
||||||
RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
|
RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func initGenerateStorageCmd() {
|
func initGenerateStorageCmd() {
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||||
|
@ -40,8 +41,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
accHash := w.GetChangeAddress()
|
accHash := w.GetChangeAddress()
|
||||||
addr, _ := cmd.Flags().GetString(walletAccountFlag)
|
if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
|
||||||
if addr != "" {
|
|
||||||
accHash, err = address.StringToUint160(addr)
|
accHash, err = address.StringToUint160(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid address: %s", addr)
|
return fmt.Errorf("invalid address: %s", addr)
|
||||||
|
@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
||||||
return fmt.Errorf("can't find account for %s", accHash)
|
return fmt.Errorf("can't find account for %s", accHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
|
prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
|
||||||
pass, err := input.ReadPassword(prompt)
|
pass, err := input.ReadPassword(prompt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't get password: %v", err)
|
return fmt.Errorf("can't get password: %v", err)
|
||||||
|
@ -73,9 +73,16 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
|
till := int64(defaultNotaryDepositLifetime)
|
||||||
if till <= 0 {
|
tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
|
||||||
return errInvalidNotaryDepositLifetime
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tillStr != "" {
|
||||||
|
till, err = strconv.ParseInt(tillStr, 10, 64)
|
||||||
|
if err != nil || till <= 0 {
|
||||||
|
return errInvalidNotaryDepositLifetime
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return transferGas(cmd, acc, accHash, gasAmount, till)
|
return transferGas(cmd, acc, accHash, gasAmount, till)
|
||||||
|
|
|
@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
|
||||||
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
|
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
|
||||||
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
|
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
|
||||||
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
|
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
|
||||||
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
|
DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|
|
@ -105,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
|
||||||
fatalOnErr(errors.New("can't find account in wallet"))
|
fatalOnErr(errors.New("can't find account in wallet"))
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
|
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
|
||||||
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
|
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
"slices"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
|
||||||
|
@ -77,31 +78,13 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
|
||||||
// SortedIDList returns sorted list of identifiers of user's containers.
|
// SortedIDList returns sorted list of identifiers of user's containers.
|
||||||
func (x ListContainersRes) SortedIDList() []cid.ID {
|
func (x ListContainersRes) SortedIDList() []cid.ID {
|
||||||
list := x.cliRes.Containers()
|
list := x.cliRes.Containers()
|
||||||
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
|
sort.Slice(list, func(i, j int) bool {
|
||||||
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
|
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
|
||||||
|
return strings.Compare(lhs, rhs) < 0
|
||||||
})
|
})
|
||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
|
|
||||||
cliPrm := &client.PrmContainerListStream{
|
|
||||||
XHeaders: prm.XHeaders,
|
|
||||||
OwnerID: prm.OwnerID,
|
|
||||||
Session: prm.Session,
|
|
||||||
}
|
|
||||||
rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("init container list: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rdr.Iterate(processCnr)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("read container list: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutContainerPrm groups parameters of PutContainer operation.
|
// PutContainerPrm groups parameters of PutContainer operation.
|
||||||
type PutContainerPrm struct {
|
type PutContainerPrm struct {
|
||||||
Client *client.Client
|
Client *client.Client
|
||||||
|
|
|
@ -6,11 +6,8 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||||
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// flags of list command.
|
// flags of list command.
|
||||||
|
@ -54,60 +51,44 @@ var listContainersCmd = &cobra.Command{
|
||||||
|
|
||||||
var prm internalclient.ListContainersPrm
|
var prm internalclient.ListContainersPrm
|
||||||
prm.SetClient(cli)
|
prm.SetClient(cli)
|
||||||
prm.OwnerID = idUser
|
prm.Account = idUser
|
||||||
|
|
||||||
|
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
||||||
|
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||||
|
|
||||||
prmGet := internalclient.GetContainerPrm{
|
prmGet := internalclient.GetContainerPrm{
|
||||||
Client: cli,
|
Client: cli,
|
||||||
}
|
}
|
||||||
var containerIDs []cid.ID
|
|
||||||
|
|
||||||
err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
|
|
||||||
printContainer(cmd, prmGet, id)
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
|
|
||||||
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
|
||||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
|
||||||
containerIDs = res.SortedIDList()
|
|
||||||
} else {
|
|
||||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
containerIDs := res.SortedIDList()
|
||||||
for _, cnrID := range containerIDs {
|
for _, cnrID := range containerIDs {
|
||||||
printContainer(cmd, prmGet, cnrID)
|
if flagVarListName == "" && !flagVarListPrintAttr {
|
||||||
|
cmd.Println(cnrID.String())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
prmGet.ClientParams.ContainerID = &cnrID
|
||||||
|
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cnr := res.Container()
|
||||||
|
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cmd.Println(cnrID.String())
|
||||||
|
|
||||||
|
if flagVarListPrintAttr {
|
||||||
|
cnr.IterateUserAttributes(func(key, val string) {
|
||||||
|
cmd.Printf(" %s: %s\n", key, val)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
|
|
||||||
if flagVarListName == "" && !flagVarListPrintAttr {
|
|
||||||
cmd.Println(id.String())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
prmGet.ClientParams.ContainerID = &id
|
|
||||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
|
||||||
if err != nil {
|
|
||||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cnr := res.Container()
|
|
||||||
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cmd.Println(id.String())
|
|
||||||
|
|
||||||
if flagVarListPrintAttr {
|
|
||||||
cnr.IterateUserAttributes(func(key, val string) {
|
|
||||||
cmd.Printf(" %s: %s\n", key, val)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func initContainerListContainersCmd() {
|
func initContainerListContainersCmd() {
|
||||||
commonflags.Init(listContainersCmd)
|
commonflags.Init(listContainersCmd)
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
@ -42,8 +43,6 @@ func initObjectHashCmd() {
|
||||||
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
|
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
|
||||||
|
|
||||||
flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
|
flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
|
||||||
_ = objectHashCmd.MarkFlagRequired("range")
|
|
||||||
|
|
||||||
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
|
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
|
||||||
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
|
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
|
||||||
}
|
}
|
||||||
|
@ -67,6 +66,36 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
||||||
pk := key.GetOrGenerate(cmd)
|
pk := key.GetOrGenerate(cmd)
|
||||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||||
|
|
||||||
|
tz := typ == hashTz
|
||||||
|
fullHash := len(ranges) == 0
|
||||||
|
if fullHash {
|
||||||
|
var headPrm internalclient.HeadObjectPrm
|
||||||
|
headPrm.SetClient(cli)
|
||||||
|
Prepare(cmd, &headPrm)
|
||||||
|
headPrm.SetAddress(objAddr)
|
||||||
|
|
||||||
|
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
|
||||||
|
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
|
||||||
|
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||||
|
|
||||||
|
var cs checksum.Checksum
|
||||||
|
var csSet bool
|
||||||
|
|
||||||
|
if tz {
|
||||||
|
cs, csSet = res.Header().PayloadHomomorphicHash()
|
||||||
|
} else {
|
||||||
|
cs, csSet = res.Header().PayloadChecksum()
|
||||||
|
}
|
||||||
|
|
||||||
|
if csSet {
|
||||||
|
cmd.Println(hex.EncodeToString(cs.Value()))
|
||||||
|
} else {
|
||||||
|
cmd.Println("Missing checksum in object header.")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
var hashPrm internalclient.HashPayloadRangesPrm
|
var hashPrm internalclient.HashPayloadRangesPrm
|
||||||
hashPrm.SetClient(cli)
|
hashPrm.SetClient(cli)
|
||||||
Prepare(cmd, &hashPrm)
|
Prepare(cmd, &hashPrm)
|
||||||
|
@ -75,7 +104,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
||||||
hashPrm.SetSalt(salt)
|
hashPrm.SetSalt(salt)
|
||||||
hashPrm.SetRanges(ranges)
|
hashPrm.SetRanges(ranges)
|
||||||
|
|
||||||
if typ == hashTz {
|
if tz {
|
||||||
hashPrm.TZ()
|
hashPrm.TZ()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) {
|
||||||
log.Info(ctx, c.name+" config updated")
|
log.Info(ctx, c.name+" config updated")
|
||||||
if err := c.shutdown(ctx); err != nil {
|
if err := c.shutdown(ctx); err != nil {
|
||||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
c.init(ctx)
|
c.init(ctx)
|
||||||
|
|
|
@ -119,12 +119,12 @@ func shutdown(ctx context.Context) {
|
||||||
innerRing.Stop(ctx)
|
innerRing.Stop(ctx)
|
||||||
if err := metricsCmp.shutdown(ctx); err != nil {
|
if err := metricsCmp.shutdown(ctx); err != nil {
|
||||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if err := pprofCmp.shutdown(ctx); err != nil {
|
if err := pprofCmp.shutdown(ctx); err != nil {
|
||||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) {
|
||||||
log.Info(ctx, c.name+" config updated")
|
log.Info(ctx, c.name+" config updated")
|
||||||
if err := c.shutdown(ctx); err != nil {
|
if err := c.shutdown(ctx); err != nil {
|
||||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -606,10 +606,9 @@ type cfgAccounting struct {
|
||||||
type cfgContainer struct {
|
type cfgContainer struct {
|
||||||
scriptHash neogoutil.Uint160
|
scriptHash neogoutil.Uint160
|
||||||
|
|
||||||
parsers map[event.Type]event.NotificationParser
|
parsers map[event.Type]event.NotificationParser
|
||||||
subscribers map[event.Type][]event.Handler
|
subscribers map[event.Type][]event.Handler
|
||||||
workerPool util.WorkerPool // pool for asynchronous handlers
|
workerPool util.WorkerPool // pool for asynchronous handlers
|
||||||
containerBatchSize uint32
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type cfgFrostfsID struct {
|
type cfgFrostfsID struct {
|
||||||
|
@ -1120,7 +1119,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
||||||
err := ls.Close(context.WithoutCancel(ctx))
|
err := ls.Close(context.WithoutCancel(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
|
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||||
|
@ -1210,7 +1209,7 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1220,9 +1219,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
||||||
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
|
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
|
||||||
// with the binary-encoded information from the current node's configuration.
|
// with the binary-encoded information from the current node's configuration.
|
||||||
// The state is set using the provided setter which MUST NOT be nil.
|
// The state is set using the provided setter which MUST NOT be nil.
|
||||||
func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
|
func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
|
||||||
ni := c.cfgNodeInfo.localInfo
|
ni := c.cfgNodeInfo.localInfo
|
||||||
ni.SetStatus(state)
|
stateSetter(&ni)
|
||||||
|
|
||||||
prm := nmClient.AddPeerPrm{}
|
prm := nmClient.AddPeerPrm{}
|
||||||
prm.SetNodeInfo(ni)
|
prm.SetNodeInfo(ni)
|
||||||
|
@ -1232,7 +1231,9 @@ func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) er
|
||||||
|
|
||||||
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
|
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
|
||||||
func bootstrapOnline(ctx context.Context, c *cfg) error {
|
func bootstrapOnline(ctx context.Context, c *cfg) error {
|
||||||
return c.bootstrapWithState(ctx, netmap.Online)
|
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
|
||||||
|
ni.SetStatus(netmap.Online)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// bootstrap calls bootstrapWithState with:
|
// bootstrap calls bootstrapWithState with:
|
||||||
|
@ -1243,7 +1244,9 @@ func (c *cfg) bootstrap(ctx context.Context) error {
|
||||||
st := c.cfgNetmap.state.controlNetmapStatus()
|
st := c.cfgNetmap.state.controlNetmapStatus()
|
||||||
if st == control.NetmapStatus_MAINTENANCE {
|
if st == control.NetmapStatus_MAINTENANCE {
|
||||||
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||||
return c.bootstrapWithState(ctx, netmap.Maintenance)
|
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
|
||||||
|
ni.SetStatus(netmap.Maintenance)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
|
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
package containerconfig
|
|
||||||
|
|
||||||
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
|
||||||
|
|
||||||
const (
|
|
||||||
subsection = "container"
|
|
||||||
listStreamSubsection = "list_stream"
|
|
||||||
|
|
||||||
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
|
|
||||||
ContainerBatchSizeDefault = 1000
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContainerBatchSize returns the value of "batch_size" config parameter
|
|
||||||
// from "list_stream" subsection of "container" section.
|
|
||||||
//
|
|
||||||
// Returns ContainerBatchSizeDefault if the value is missing or if
|
|
||||||
// the value is not positive integer.
|
|
||||||
func ContainerBatchSize(c *config.Config) uint32 {
|
|
||||||
if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
|
|
||||||
return ContainerBatchSizeDefault
|
|
||||||
}
|
|
||||||
size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
|
|
||||||
if size == 0 {
|
|
||||||
return ContainerBatchSizeDefault
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
|
@ -1,27 +0,0 @@
|
||||||
package containerconfig_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
|
||||||
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
|
|
||||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestContainerSection(t *testing.T) {
|
|
||||||
t.Run("defaults", func(t *testing.T) {
|
|
||||||
empty := configtest.EmptyConfig()
|
|
||||||
require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
|
|
||||||
})
|
|
||||||
|
|
||||||
const path = "../../../../config/example/node"
|
|
||||||
fileConfigTest := func(c *config.Config) {
|
|
||||||
require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
configtest.ForEachFileType(path, fileConfigTest)
|
|
||||||
t.Run("ENV", func(t *testing.T) {
|
|
||||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
|
|
||||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||||
|
@ -48,7 +47,6 @@ func initContainerService(_ context.Context, c *cfg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
||||||
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
|
|
||||||
|
|
||||||
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
|
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
|
||||||
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
|
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
|
||||||
|
@ -58,9 +56,7 @@ func initContainerService(_ context.Context, c *cfg) {
|
||||||
&c.key.PrivateKey,
|
&c.key.PrivateKey,
|
||||||
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
|
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
|
||||||
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
|
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
|
||||||
containerService.NewSplitterService(
|
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
|
||||||
c.cfgContainer.containerBatchSize, c.respSvc,
|
|
||||||
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
service = containerService.NewAuditService(service, c.log, c.audit)
|
service = containerService.NewAuditService(service, c.log, c.audit)
|
||||||
|
@ -222,7 +218,6 @@ type morphContainerReader struct {
|
||||||
|
|
||||||
lister interface {
|
lister interface {
|
||||||
ContainersOf(*user.ID) ([]cid.ID, error)
|
ContainersOf(*user.ID) ([]cid.ID, error)
|
||||||
IterateContainersOf(*user.ID, func(cid.ID) error) error
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -238,10 +233,6 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
|
||||||
return x.lister.ContainersOf(id)
|
return x.lister.ContainersOf(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error {
|
|
||||||
return x.lister.IterateContainersOf(id, processCID)
|
|
||||||
}
|
|
||||||
|
|
||||||
type morphContainerWriter struct {
|
type morphContainerWriter struct {
|
||||||
neoClient *cntClient.Client
|
neoClient *cntClient.Client
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,7 +134,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C
|
||||||
err := stopper(ctx)
|
err := stopper(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
|
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -96,7 +96,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||||
zap.Any("endpoints", addresses),
|
zap.Any("endpoints", addresses),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
@ -168,7 +168,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
||||||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fromSideChainBlock = 0
|
fromSideChainBlock = 0
|
||||||
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
|
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||||
|
|
|
@ -197,7 +197,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
||||||
_, _, err := makeNotaryDeposit(ctx, c)
|
_, _, err := makeNotaryDeposit(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -58,7 +58,7 @@ func (c *cfg) MaxObjectSize() uint64 {
|
||||||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
||||||
_, err := ls.Inhume(ctx, inhumePrm)
|
_, err := ls.Inhume(ctx, inhumePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -113,7 +113,7 @@ func initTreeService(c *cfg) {
|
||||||
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
||||||
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||||
zap.Stringer("cid", ev.ID),
|
zap.Stringer("cid", ev.ID),
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -83,9 +83,6 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
|
||||||
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
|
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
|
||||||
FROSTFS_REPLICATOR_POOL_SIZE=10
|
FROSTFS_REPLICATOR_POOL_SIZE=10
|
||||||
|
|
||||||
# Container service section
|
|
||||||
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
|
|
||||||
|
|
||||||
# Object service section
|
# Object service section
|
||||||
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
|
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
|
||||||
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
|
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
|
||||||
|
|
|
@ -124,11 +124,6 @@
|
||||||
"pool_size": 10,
|
"pool_size": 10,
|
||||||
"put_timeout": "15s"
|
"put_timeout": "15s"
|
||||||
},
|
},
|
||||||
"container": {
|
|
||||||
"list_stream": {
|
|
||||||
"batch_size": "500"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"object": {
|
"object": {
|
||||||
"delete": {
|
"delete": {
|
||||||
"tombstone_lifetime": 10
|
"tombstone_lifetime": 10
|
||||||
|
|
|
@ -108,10 +108,6 @@ replicator:
|
||||||
put_timeout: 15s # timeout for the Replicator PUT remote operation
|
put_timeout: 15s # timeout for the Replicator PUT remote operation
|
||||||
pool_size: 10 # maximum amount of concurrent replications
|
pool_size: 10 # maximum amount of concurrent replications
|
||||||
|
|
||||||
container:
|
|
||||||
list_stream:
|
|
||||||
batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once
|
|
||||||
|
|
||||||
object:
|
object:
|
||||||
delete:
|
delete:
|
||||||
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
|
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -8,7 +8,7 @@ require (
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241206094944-81c423e7094d
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
|
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
|
||||||
|
|
BIN
go.sum
BIN
go.sum
Binary file not shown.
|
@ -12,9 +12,8 @@ type ApplicationInfo struct {
|
||||||
func NewApplicationInfo(version string) *ApplicationInfo {
|
func NewApplicationInfo(version string) *ApplicationInfo {
|
||||||
appInfo := &ApplicationInfo{
|
appInfo := &ApplicationInfo{
|
||||||
versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
|
versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
Namespace: namespace,
|
Name: "app_info",
|
||||||
Name: "app_info",
|
Help: "General information about the application.",
|
||||||
Help: "General information about the application.",
|
|
||||||
}, []string{"version"}),
|
}, []string{"version"}),
|
||||||
}
|
}
|
||||||
appInfo.versionValue.With(prometheus.Labels{"version": version})
|
appInfo.versionValue.With(prometheus.Labels{"version": version})
|
||||||
|
|
|
@ -67,7 +67,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// do not throw error, try best case matching
|
// do not throw error, try best case matching
|
||||||
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
|
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
} else if isInnerRingNode {
|
} else if isInnerRingNode {
|
||||||
return &ClassifyResult{
|
return &ClassifyResult{
|
||||||
Role: acl.RoleInnerRing,
|
Role: acl.RoleInnerRing,
|
||||||
|
@ -84,7 +84,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
|
||||||
// is not possible for previous epoch, so
|
// is not possible for previous epoch, so
|
||||||
// do not throw error, try best case matching
|
// do not throw error, try best case matching
|
||||||
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
|
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
} else if isContainerNode {
|
} else if isContainerNode {
|
||||||
return &ClassifyResult{
|
return &ClassifyResult{
|
||||||
Role: acl.RoleContainer,
|
Role: acl.RoleContainer,
|
||||||
|
|
|
@ -100,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
|
||||||
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
|
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fromMainChainBlock = 0
|
fromMainChainBlock = 0
|
||||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
|
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
mainnetChain.from = fromMainChainBlock
|
mainnetChain.from = fromMainChainBlock
|
||||||
|
|
||||||
|
@ -456,7 +456,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
||||||
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
|
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fromSideChainBlock = 0
|
fromSideChainBlock = 0
|
||||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
|
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
morphChain := &chainParams{
|
morphChain := &chainParams{
|
||||||
|
|
|
@ -177,7 +177,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// we don't stop inner ring execution on this error
|
// we don't stop inner ring execution on this error
|
||||||
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
|
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.tickInitialExpoch(ctx)
|
s.tickInitialExpoch(ctx)
|
||||||
|
@ -308,7 +308,7 @@ func (s *Server) Stop(ctx context.Context) {
|
||||||
for _, c := range s.closers {
|
for _, c := range s.closers {
|
||||||
if err := c(); err != nil {
|
if err := c(); err != nil {
|
||||||
s.log.Warn(ctx, logs.InnerringCloserError,
|
s.log.Warn(ctx, logs.InnerringCloserError,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
|
||||||
// there is no signature collecting, so we don't need extra fee
|
// there is no signature collecting, so we don't need extra fee
|
||||||
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
|
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
|
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
|
||||||
networkMap, err := ap.netmapClient.NetMap()
|
networkMap, err := ap.netmapClient.NetMap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
|
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
|
||||||
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
|
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -93,7 +93,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
|
||||||
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
|
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
|
||||||
zap.String("receiver", key.Address()),
|
zap.String("receiver", key.Address()),
|
||||||
zap.Int64("amount", int64(gasPerNode)),
|
zap.Int64("amount", int64(gasPerNode)),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,7 +110,7 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint
|
||||||
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
|
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
|
||||||
zap.Strings("receivers", receiversLog),
|
zap.Strings("receivers", receiversLog),
|
||||||
zap.Int64("amount", int64(gasPerNode)),
|
zap.Int64("amount", int64(gasPerNode)),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
|
||||||
err := cp.checkPutContainer(pctx)
|
err := cp.checkPutContainer(pctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
|
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
@ -58,7 +58,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
|
||||||
|
|
||||||
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
|
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
|
||||||
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
|
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
|
||||||
err := cp.checkDeleteContainer(e)
|
err := cp.checkDeleteContainer(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
|
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
@ -121,7 +121,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
|
||||||
|
|
||||||
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
|
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
|
||||||
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
|
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -73,7 +73,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
|
||||||
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
|
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
|
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,21 +28,21 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
|
||||||
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
|
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
|
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
sidechainAlphabet, err := gp.morphClient.Committee()
|
sidechainAlphabet, err := gp.morphClient.Committee()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
|
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
|
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
|
gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
|
||||||
err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
|
err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
|
gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Update NeoFSAlphabet role in the sidechain.
|
// 2. Update NeoFSAlphabet role in the sidechain.
|
||||||
|
@ -98,14 +98,14 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
|
||||||
innerRing, err := gp.irFetcher.InnerRingKeys()
|
innerRing, err := gp.irFetcher.InnerRingKeys()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
|
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
|
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
|
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
|
||||||
|
|
||||||
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
|
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
|
||||||
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
|
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,7 +135,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabe
|
||||||
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
|
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
|
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -155,6 +155,6 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph
|
||||||
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
|
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
|
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
|
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
|
||||||
epochDuration, err := np.netmapClient.EpochDuration()
|
epochDuration, err := np.netmapClient.EpochDuration()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
|
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
} else {
|
} else {
|
||||||
np.epochState.SetEpochDuration(epochDuration)
|
np.epochState.SetEpochDuration(epochDuration)
|
||||||
}
|
}
|
||||||
|
@ -28,19 +28,19 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
|
np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
|
||||||
zap.String("hash", ev.TxHash().StringLE()),
|
zap.String("hash", ev.TxHash().StringLE()),
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
|
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
|
||||||
np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
|
np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// get new netmap snapshot
|
// get new netmap snapshot
|
||||||
networkMap, err := np.netmapClient.NetMap()
|
networkMap, err := np.netmapClient.NetMap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
|
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
|
||||||
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
|
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -62,7 +62,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool {
|
||||||
func (s *Server) InnerRingIndex(ctx context.Context) int {
|
func (s *Server) InnerRingIndex(ctx context.Context) int {
|
||||||
index, err := s.statusIndex.InnerRingIndex()
|
index, err := s.statusIndex.InnerRingIndex()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
|
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int {
|
||||||
func (s *Server) InnerRingSize(ctx context.Context) int {
|
func (s *Server) InnerRingSize(ctx context.Context) int {
|
||||||
size, err := s.statusIndex.InnerRingSize()
|
size, err := s.statusIndex.InnerRingSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
|
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
|
||||||
func (s *Server) AlphabetIndex(ctx context.Context) int {
|
func (s *Server) AlphabetIndex(ctx context.Context) int {
|
||||||
index, err := s.statusIndex.AlphabetIndex()
|
index, err := s.statusIndex.AlphabetIndex()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
|
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
|
||||||
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
|
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
|
||||||
zap.Int8("alphabet_index", int8(letter)),
|
zap.Int8("alphabet_index", int8(letter)),
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("determine DB size: %w", err)
|
return fmt.Errorf("can't determine DB size: %w", err)
|
||||||
}
|
}
|
||||||
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
|
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
|
||||||
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
|
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||||
|
@ -140,7 +140,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
|
||||||
return saveItemsCount(tx, items)
|
return saveItemsCount(tx, items)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
|
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||||
return fmt.Errorf("save blobovnicza's size and items count: %w", err)
|
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
|
||||||
}
|
}
|
||||||
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
|
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes,
|
||||||
if prm.ignoreErrors {
|
if prm.ignoreErrors {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("decode address key: %w", err)
|
return fmt.Errorf("could not decode address key: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
|
||||||
if !client.IsErrObjectNotFound(err) {
|
if !client.IsErrObjectNotFound(err) {
|
||||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
|
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
|
||||||
if !client.IsErrObjectNotFound(err) {
|
if !client.IsErrObjectNotFound(err) {
|
||||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
|
||||||
if !client.IsErrObjectNotFound(err) {
|
if !client.IsErrObjectNotFound(err) {
|
||||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -115,13 +115,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic
|
||||||
// decompress the data
|
// decompress the data
|
||||||
data, err := b.compression.Decompress(res.Object())
|
data, err := b.compression.Decompress(res.Object())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
|
return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// unmarshal the object
|
// unmarshal the object
|
||||||
obj := objectSDK.New()
|
obj := objectSDK.New()
|
||||||
if err := obj.Unmarshal(data); err != nil {
|
if err := obj.Unmarshal(data); err != nil {
|
||||||
return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
|
return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return common.GetRes{Object: obj, RawData: data}, nil
|
return common.GetRes{Object: obj, RawData: data}, nil
|
||||||
|
|
|
@ -71,7 +71,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
|
||||||
if !outOfBounds && !client.IsErrObjectNotFound(err) {
|
if !outOfBounds && !client.IsErrObjectNotFound(err) {
|
||||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
}
|
}
|
||||||
if outOfBounds {
|
if outOfBounds {
|
||||||
|
@ -130,13 +130,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob
|
||||||
// decompress the data
|
// decompress the data
|
||||||
data, err := b.compression.Decompress(res.Object())
|
data, err := b.compression.Decompress(res.Object())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err)
|
return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// unmarshal the object
|
// unmarshal the object
|
||||||
obj := objectSDK.New()
|
obj := objectSDK.New()
|
||||||
if err := obj.Unmarshal(data); err != nil {
|
if err := obj.Unmarshal(data); err != nil {
|
||||||
return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err)
|
return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
from := prm.Range.GetOffset()
|
from := prm.Range.GetOffset()
|
||||||
|
|
|
@ -44,12 +44,12 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
|
||||||
if prm.IgnoreErrors {
|
if prm.IgnoreErrors {
|
||||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.Stringer("address", elem.Address()),
|
zap.Stringer("address", elem.Address()),
|
||||||
zap.Error(err),
|
zap.String("err", err.Error()),
|
||||||
zap.String("storage_id", p),
|
zap.String("storage_id", p),
|
||||||
zap.String("root_path", b.rootPath))
|
zap.String("root_path", b.rootPath))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("decompress object data: %w", err)
|
return fmt.Errorf("could not decompress object data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if prm.Handler != nil {
|
if prm.Handler != nil {
|
||||||
|
@ -77,12 +77,12 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ignoreErrors {
|
if ignoreErrors {
|
||||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.Error(err),
|
zap.String("err", err.Error()),
|
||||||
zap.String("storage_id", p),
|
zap.String("storage_id", p),
|
||||||
zap.String("root_path", b.rootPath))
|
zap.String("root_path", b.rootPath))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("open blobovnicza %s: %w", p, err)
|
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
|
||||||
}
|
}
|
||||||
defer shBlz.Close(ctx)
|
defer shBlz.Close(ctx)
|
||||||
|
|
||||||
|
|
|
@ -69,10 +69,10 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
|
||||||
)...)
|
)...)
|
||||||
|
|
||||||
if err := blz.Open(ctx); err != nil {
|
if err := blz.Open(ctx); err != nil {
|
||||||
return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err)
|
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
|
||||||
}
|
}
|
||||||
if err := blz.Init(ctx); err != nil {
|
if err := blz.Init(ctx); err != nil {
|
||||||
return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err)
|
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.refCount++
|
b.refCount++
|
||||||
|
@ -97,7 +97,7 @@ func (b *sharedDB) Close(ctx context.Context) {
|
||||||
if err := b.blcza.Close(ctx); err != nil {
|
if err := b.blcza.Close(ctx); err != nil {
|
||||||
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||||
zap.String("id", b.path),
|
zap.String("id", b.path),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
b.blcza = nil
|
b.blcza = nil
|
||||||
|
@ -125,9 +125,9 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
|
||||||
if err := b.blcza.Close(ctx); err != nil {
|
if err := b.blcza.Close(ctx); err != nil {
|
||||||
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||||
zap.String("id", b.path),
|
zap.String("id", b.path),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err)
|
return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.refCount = 0
|
b.refCount = 0
|
||||||
|
|
|
@ -83,7 +83,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
||||||
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
|
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
|
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
|
||||||
zap.String("path", active.SystemPath()),
|
zap.String("path", active.SystemPath()),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
}
|
}
|
||||||
if errors.Is(err, blobovnicza.ErrNoSpace) {
|
if errors.Is(err, blobovnicza.ErrNoSpace) {
|
||||||
|
|
|
@ -74,7 +74,7 @@ func (b *BlobStor) Close(ctx context.Context) error {
|
||||||
for i := range b.storage {
|
for i := range b.storage {
|
||||||
err := b.storage[i].Storage.Close(ctx)
|
err := b.storage[i].Storage.Close(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err))
|
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
|
||||||
if firstErr == nil {
|
if firstErr == nil {
|
||||||
firstErr = err
|
firstErr = err
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,7 +75,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
|
||||||
for _, err := range errors[:len(errors)-1] {
|
for _, err := range errors[:len(errors)-1] {
|
||||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
|
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
|
||||||
zap.Stringer("address", prm.Address),
|
zap.Stringer("address", prm.Address),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,7 +153,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if prm.IgnoreErrors {
|
if prm.IgnoreErrors {
|
||||||
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.Error(err),
|
zap.String("err", err.Error()),
|
||||||
zap.String("directory_path", dirPath))
|
zap.String("directory_path", dirPath))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -202,7 +202,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
||||||
if prm.IgnoreErrors {
|
if prm.IgnoreErrors {
|
||||||
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.Stringer("address", addr),
|
zap.Stringer("address", addr),
|
||||||
zap.Error(err),
|
zap.String("err", err.Error()),
|
||||||
zap.String("path", path))
|
zap.String("path", path))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -538,7 +538,7 @@ func (t *FSTree) countFiles() (uint64, uint64, error) {
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
|
return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return count, size, nil
|
return count, size, nil
|
||||||
|
@ -577,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
|
return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
|
||||||
}
|
}
|
||||||
success = true
|
success = true
|
||||||
return result, nil
|
return result, nil
|
||||||
|
|
|
@ -45,7 +45,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
|
||||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.String("storage_path", b.storage[i].Storage.Path()),
|
zap.String("storage_path", b.storage[i].Storage.Path()),
|
||||||
zap.String("storage_type", b.storage[i].Storage.Type()),
|
zap.String("storage_type", b.storage[i].Storage.Type()),
|
||||||
zap.Error(err))
|
zap.String("err", err.Error()))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)
|
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)
|
||||||
|
|
|
@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes,
|
||||||
// Decompress the data.
|
// Decompress the data.
|
||||||
var err error
|
var err error
|
||||||
if data, err = s.compression.Decompress(data); err != nil {
|
if data, err = s.compression.Decompress(data); err != nil {
|
||||||
return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
|
return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal the SDK object.
|
// Unmarshal the SDK object.
|
||||||
obj := objectSDK.New()
|
obj := objectSDK.New()
|
||||||
if err := obj.Unmarshal(data); err != nil {
|
if err := obj.Unmarshal(data); err != nil {
|
||||||
return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
|
return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return common.GetRes{Object: obj, RawData: data}, nil
|
return common.GetRes{Object: obj, RawData: data}, nil
|
||||||
|
|
|
@ -27,7 +27,7 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
|
return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.mode = m
|
b.mode = m
|
||||||
|
|
|
@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
|
||||||
// marshal object
|
// marshal object
|
||||||
data, err := prm.Object.Marshal()
|
data, err := prm.Object.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.PutRes{}, fmt.Errorf("marshal the object: %w", err)
|
return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err)
|
||||||
}
|
}
|
||||||
prm.RawData = data
|
prm.RawData = data
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
|
||||||
err := eg.Wait()
|
err := eg.Wait()
|
||||||
close(errCh)
|
close(errCh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("initialize shards: %w", err)
|
return fmt.Errorf("failed to initialize shards: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for res := range errCh {
|
for res := range errCh {
|
||||||
|
@ -117,7 +117,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return fmt.Errorf("initialize shard %s: %w", res.id, res.err)
|
return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ func (e *StorageEngine) close(ctx context.Context, releasePools bool) error {
|
||||||
if err := sh.Close(ctx); err != nil {
|
if err := sh.Close(ctx); err != nil {
|
||||||
e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
|
e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
|
||||||
zap.String("id", id),
|
zap.String("id", id),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -320,7 +320,7 @@ loop:
|
||||||
for _, newID := range shardsToAdd {
|
for _, newID := range shardsToAdd {
|
||||||
sh, err := e.createShard(ctx, rcfg.shards[newID])
|
sh, err := e.createShard(ctx, rcfg.shards[newID])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err)
|
return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
idStr := sh.ID().String()
|
idStr := sh.ID().String()
|
||||||
|
@ -331,13 +331,13 @@ loop:
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = sh.Close(ctx)
|
_ = sh.Close(ctx)
|
||||||
return fmt.Errorf("init %s shard: %w", idStr, err)
|
return fmt.Errorf("could not init %s shard: %w", idStr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = e.addShard(sh)
|
err = e.addShard(sh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = sh.Close(ctx)
|
_ = sh.Close(ctx)
|
||||||
return fmt.Errorf("add %s shard: %w", idStr, err)
|
return fmt.Errorf("could not add %s shard: %w", idStr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))
|
e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))
|
||||||
|
|
|
@ -154,7 +154,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
|
e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
|
||||||
zap.Stringer("addr", addr),
|
zap.Stringer("addr", addr),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -166,7 +166,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
||||||
zap.Stringer("addr", addr),
|
zap.Stringer("addr", addr),
|
||||||
zap.Error(err),
|
zap.String("err", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -196,7 +196,7 @@ func (e *StorageEngine) deleteChunks(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
||||||
zap.Stringer("addr", addr),
|
zap.Stringer("addr", addr),
|
||||||
zap.Error(err),
|
zap.String("err", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,7 +140,7 @@ func (e *StorageEngine) reportShardError(
|
||||||
if isLogical(err) {
|
if isLogical(err) {
|
||||||
e.log.Warn(ctx, msg,
|
e.log.Warn(ctx, msg,
|
||||||
zap.Stringer("shard_id", sh.ID()),
|
zap.Stringer("shard_id", sh.ID()),
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ func (e *StorageEngine) reportShardError(
|
||||||
e.log.Warn(ctx, msg, append([]zap.Field{
|
e.log.Warn(ctx, msg, append([]zap.Field{
|
||||||
zap.Stringer("shard_id", sid),
|
zap.Stringer("shard_id", sid),
|
||||||
zap.Uint32("error count", errCount),
|
zap.Uint32("error count", errCount),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
}, fields...)...)
|
}, fields...)...)
|
||||||
|
|
||||||
if e.errorsThreshold == 0 || errCount < e.errorsThreshold {
|
if e.errorsThreshold == 0 || errCount < e.errorsThreshold {
|
||||||
|
|
|
@ -578,7 +578,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
|
||||||
|
|
||||||
func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) {
|
func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) {
|
||||||
if prm.TreeHandler == nil {
|
if prm.TreeHandler == nil {
|
||||||
return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
|
return false, "", fmt.Errorf("failed to evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
|
||||||
}
|
}
|
||||||
|
|
||||||
return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh)
|
return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh)
|
||||||
|
|
|
@ -106,7 +106,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
|
||||||
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
||||||
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
|
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
|
||||||
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
||||||
zap.Error(it.MetaError),
|
zap.String("error", it.MetaError.Error()),
|
||||||
zap.Stringer("address", prm.addr),
|
zap.Stringer("address", prm.addr),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,7 +143,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
|
||||||
} else {
|
} else {
|
||||||
e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
|
e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
|
||||||
zap.Stringer("shard_id", sh.ID()),
|
zap.Stringer("shard_id", sh.ID()),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,14 +165,14 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
|
||||||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
|
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
|
||||||
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
|
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
|
||||||
zap.Stringer("shard_id", sh.ID()),
|
zap.Stringer("shard_id", sh.ID()),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if client.IsErrObjectAlreadyRemoved(err) {
|
if client.IsErrObjectAlreadyRemoved(err) {
|
||||||
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
|
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
|
||||||
zap.Stringer("shard_id", sh.ID()),
|
zap.Stringer("shard_id", sh.ID()),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
res.status = putToShardRemoved
|
res.status = putToShardRemoved
|
||||||
res.err = err
|
res.err = err
|
||||||
|
|
|
@ -118,7 +118,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
|
||||||
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
||||||
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
|
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
|
||||||
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
||||||
zap.Error(it.MetaError),
|
zap.String("error", it.MetaError.Error()),
|
||||||
zap.Stringer("address", prm.addr),
|
zap.Stringer("address", prm.addr),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,12 +108,12 @@ func (m *metricsWithID) SetEvacuationInProgress(value bool) {
|
||||||
func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) {
|
func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) {
|
||||||
sh, err := e.createShard(ctx, opts)
|
sh, err := e.createShard(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("create a shard: %w", err)
|
return nil, fmt.Errorf("could not create a shard: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = e.addShard(sh)
|
err = e.addShard(sh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err)
|
return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
|
e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
|
||||||
|
@ -124,7 +124,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh
|
||||||
func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) {
|
func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) {
|
||||||
id, err := generateShardID()
|
id, err := generateShardID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("generate shard ID: %w", err)
|
return nil, fmt.Errorf("could not generate shard ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts = e.appendMetrics(id, opts)
|
opts = e.appendMetrics(id, opts)
|
||||||
|
@ -180,7 +180,7 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
|
||||||
|
|
||||||
pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true))
|
pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create pool: %w", err)
|
return fmt.Errorf("could not create pool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
strID := sh.ID().String()
|
strID := sh.ID().String()
|
||||||
|
@ -374,7 +374,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
multiErrGuard.Lock()
|
multiErrGuard.Lock()
|
||||||
multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err))
|
multiErr = errors.Join(multiErr, fmt.Errorf("could not change shard (id:%s) mode to disabled: %w", sh.ID(), err))
|
||||||
multiErrGuard.Unlock()
|
multiErrGuard.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -385,7 +385,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
multiErrGuard.Lock()
|
multiErrGuard.Lock()
|
||||||
multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err))
|
multiErr = errors.Join(multiErr, fmt.Errorf("could not close removed shard (id:%s): %w", sh.ID(), err))
|
||||||
multiErrGuard.Unlock()
|
multiErrGuard.Unlock()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -54,7 +54,7 @@ func (db *DB) Open(ctx context.Context, m mode.Mode) error {
|
||||||
func (db *DB) openDB(ctx context.Context, mode mode.Mode) error {
|
func (db *DB) openDB(ctx context.Context, mode mode.Mode) error {
|
||||||
err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission)
|
err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err)
|
return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
|
db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
|
||||||
|
@ -73,7 +73,7 @@ func (db *DB) openBolt(ctx context.Context) error {
|
||||||
|
|
||||||
db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions)
|
db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("open boltDB database: %w", err)
|
return fmt.Errorf("can't open boltDB database: %w", err)
|
||||||
}
|
}
|
||||||
db.boltDB.MaxBatchDelay = db.boltBatchDelay
|
db.boltDB.MaxBatchDelay = db.boltBatchDelay
|
||||||
db.boltDB.MaxBatchSize = db.boltBatchSize
|
db.boltDB.MaxBatchSize = db.boltBatchSize
|
||||||
|
@ -145,27 +145,27 @@ func (db *DB) init(reset bool) error {
|
||||||
if reset {
|
if reset {
|
||||||
err := tx.DeleteBucket(name)
|
err := tx.DeleteBucket(name)
|
||||||
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
|
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
|
||||||
return fmt.Errorf("delete static bucket %s: %w", k, err)
|
return fmt.Errorf("could not delete static bucket %s: %w", k, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := tx.CreateBucketIfNotExists(name)
|
_, err := tx.CreateBucketIfNotExists(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create static bucket %s: %w", k, err)
|
return fmt.Errorf("could not create static bucket %s: %w", k, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, b := range deprecatedBuckets {
|
for _, b := range deprecatedBuckets {
|
||||||
err := tx.DeleteBucket(b)
|
err := tx.DeleteBucket(b)
|
||||||
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
|
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
|
||||||
return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err)
|
return fmt.Errorf("could not delete deprecated bucket %s: %w", string(b), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reset { // counters will be recalculated by refill metabase
|
if !reset { // counters will be recalculated by refill metabase
|
||||||
err = syncCounter(tx, false)
|
err = syncCounter(tx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("sync object counter: %w", err)
|
return fmt.Errorf("could not sync object counter: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -238,14 +238,14 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil {
|
if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil {
|
||||||
return fmt.Errorf("increase phy object counter: %w", err)
|
return fmt.Errorf("could not increase phy object counter: %w", err)
|
||||||
}
|
}
|
||||||
if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil {
|
if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil {
|
||||||
return fmt.Errorf("increase logical object counter: %w", err)
|
return fmt.Errorf("could not increase logical object counter: %w", err)
|
||||||
}
|
}
|
||||||
if isUserObject {
|
if isUserObject {
|
||||||
if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil {
|
if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil {
|
||||||
return fmt.Errorf("increase user object counter: %w", err)
|
return fmt.Errorf("could not increase user object counter: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return db.incContainerObjectCounter(tx, cnrID, isUserObject)
|
return db.incContainerObjectCounter(tx, cnrID, isUserObject)
|
||||||
|
@ -362,7 +362,7 @@ func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject
|
||||||
func syncCounter(tx *bbolt.Tx, force bool) error {
|
func syncCounter(tx *bbolt.Tx, force bool) error {
|
||||||
shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket)
|
shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get shard info bucket: %w", err)
|
return fmt.Errorf("could not get shard info bucket: %w", err)
|
||||||
}
|
}
|
||||||
shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 &&
|
shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 &&
|
||||||
len(shardInfoB.Get(objectLogicCounterKey)) == 8 &&
|
len(shardInfoB.Get(objectLogicCounterKey)) == 8 &&
|
||||||
|
@ -375,7 +375,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error {
|
||||||
|
|
||||||
containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName)
|
containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get container counter bucket: %w", err)
|
return fmt.Errorf("could not get container counter bucket: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
|
@ -428,7 +428,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("iterate objects: %w", err)
|
return fmt.Errorf("could not iterate objects: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return setObjectCounters(counters, shardInfoB, containerCounterB)
|
return setObjectCounters(counters, shardInfoB, containerCounterB)
|
||||||
|
@ -448,7 +448,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
|
||||||
value := containerCounterValue(count)
|
value := containerCounterValue(count)
|
||||||
err := containerCounterB.Put(key, value)
|
err := containerCounterB.Put(key, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("update phy container object counter: %w", err)
|
return fmt.Errorf("could not update phy container object counter: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
phyData := make([]byte, 8)
|
phyData := make([]byte, 8)
|
||||||
|
@ -456,7 +456,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
|
||||||
|
|
||||||
err := shardInfoB.Put(objectPhyCounterKey, phyData)
|
err := shardInfoB.Put(objectPhyCounterKey, phyData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("update phy object counter: %w", err)
|
return fmt.Errorf("could not update phy object counter: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logData := make([]byte, 8)
|
logData := make([]byte, 8)
|
||||||
|
@ -464,7 +464,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
|
||||||
|
|
||||||
err = shardInfoB.Put(objectLogicCounterKey, logData)
|
err = shardInfoB.Put(objectLogicCounterKey, logData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("update logic object counter: %w", err)
|
return fmt.Errorf("could not update logic object counter: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
userData := make([]byte, 8)
|
userData := make([]byte, 8)
|
||||||
|
@ -472,7 +472,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
|
||||||
|
|
||||||
err = shardInfoB.Put(objectUserCounterKey, userData)
|
err = shardInfoB.Put(objectUserCounterKey, userData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("update user object counter: %w", err)
|
return fmt.Errorf("could not update user object counter: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -492,7 +492,7 @@ func parseContainerCounterKey(buf []byte) (cid.ID, error) {
|
||||||
}
|
}
|
||||||
var cnrID cid.ID
|
var cnrID cid.ID
|
||||||
if err := cnrID.Decode(buf); err != nil {
|
if err := cnrID.Decode(buf); err != nil {
|
||||||
return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
|
return cid.ID{}, fmt.Errorf("failed to decode container ID: %w", err)
|
||||||
}
|
}
|
||||||
return cnrID, nil
|
return cnrID, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -163,26 +163,26 @@ func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error {
|
||||||
if res.phyCount > 0 {
|
if res.phyCount > 0 {
|
||||||
err := db.updateShardObjectCounter(tx, phy, res.phyCount, false)
|
err := db.updateShardObjectCounter(tx, phy, res.phyCount, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("decrease phy object counter: %w", err)
|
return fmt.Errorf("could not decrease phy object counter: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.logicCount > 0 {
|
if res.logicCount > 0 {
|
||||||
err := db.updateShardObjectCounter(tx, logical, res.logicCount, false)
|
err := db.updateShardObjectCounter(tx, logical, res.logicCount, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("decrease logical object counter: %w", err)
|
return fmt.Errorf("could not decrease logical object counter: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.userCount > 0 {
|
if res.userCount > 0 {
|
||||||
err := db.updateShardObjectCounter(tx, user, res.userCount, false)
|
err := db.updateShardObjectCounter(tx, user, res.userCount, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("decrease user object counter: %w", err)
|
return fmt.Errorf("could not decrease user object counter: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil {
|
if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil {
|
||||||
return fmt.Errorf("decrease container object counter: %w", err)
|
return fmt.Errorf("could not decrease container object counter: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -259,7 +259,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
|
||||||
if garbageBKT != nil {
|
if garbageBKT != nil {
|
||||||
err := garbageBKT.Delete(addrKey)
|
err := garbageBKT.Delete(addrKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
|
return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return deleteSingleResult{}, nil
|
return deleteSingleResult{}, nil
|
||||||
|
@ -280,7 +280,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
|
||||||
if garbageBKT != nil {
|
if garbageBKT != nil {
|
||||||
err := garbageBKT.Delete(addrKey)
|
err := garbageBKT.Delete(addrKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
|
return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -308,7 +308,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
|
||||||
// remove object
|
// remove object
|
||||||
err = db.deleteObject(tx, obj, false)
|
err = db.deleteObject(tx, obj, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return deleteSingleResult{}, fmt.Errorf("remove object: %w", err)
|
return deleteSingleResult{}, fmt.Errorf("could not remove object: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil {
|
if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil {
|
||||||
|
@ -335,12 +335,12 @@ func (db *DB) deleteObject(
|
||||||
|
|
||||||
err = updateListIndexes(tx, obj, delListIndexItem)
|
err = updateListIndexes(tx, obj, delListIndexItem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("remove list indexes: %w", err)
|
return fmt.Errorf("can't remove list indexes: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
|
err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("remove fake bucket tree indexes: %w", err)
|
return fmt.Errorf("can't remove fake bucket tree indexes: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if isParent {
|
if isParent {
|
||||||
|
@ -351,7 +351,7 @@ func (db *DB) deleteObject(
|
||||||
addrKey := addressKey(object.AddressOf(obj), key)
|
addrKey := addressKey(object.AddressOf(obj), key)
|
||||||
err := garbageBKT.Delete(addrKey)
|
err := garbageBKT.Delete(addrKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("remove from garbage bucket: %w", err)
|
return fmt.Errorf("could not remove from garbage bucket: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -529,7 +529,7 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
|
||||||
addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize))
|
addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize))
|
||||||
err := garbageBKT.Delete(addrKey)
|
err := garbageBKT.Delete(addrKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
|
return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -567,7 +567,7 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
|
||||||
addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize))
|
addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize))
|
||||||
err := garbageBKT.Delete(addrKey)
|
err := garbageBKT.Delete(addrKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
|
return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -229,7 +229,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e
|
||||||
|
|
||||||
err := splitInfo.Unmarshal(bytes.Clone(rawSplitInfo))
|
err := splitInfo.Unmarshal(bytes.Clone(rawSplitInfo))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unmarshal split info from root index: %w", err)
|
return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return splitInfo, nil
|
return splitInfo, nil
|
||||||
|
|
|
@ -187,7 +187,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
|
||||||
|
|
||||||
err = child.Unmarshal(bytes.Clone(data))
|
err = child.Unmarshal(bytes.Clone(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unmarshal child with parent: %w", err)
|
return nil, fmt.Errorf("can't unmarshal child with parent: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
par := child.Parent()
|
par := child.Parent()
|
||||||
|
|
|
@ -219,6 +219,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
|
||||||
meta.WithMaxBatchSize(batchSize),
|
meta.WithMaxBatchSize(batchSize),
|
||||||
meta.WithMaxBatchDelay(10*time.Millisecond),
|
meta.WithMaxBatchDelay(10*time.Millisecond),
|
||||||
)
|
)
|
||||||
|
defer func() { require.NoError(b, db.Close(context.Background())) }()
|
||||||
addrs := make([]oid.Address, 0, numOfObj)
|
addrs := make([]oid.Address, 0, numOfObj)
|
||||||
|
|
||||||
for range numOfObj {
|
for range numOfObj {
|
||||||
|
@ -233,7 +234,6 @@ func benchmarkGet(b *testing.B, numOfObj int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
db, addrs := prepareDb(runtime.NumCPU())
|
db, addrs := prepareDb(runtime.NumCPU())
|
||||||
defer func() { require.NoError(b, db.Close(context.Background())) }()
|
|
||||||
|
|
||||||
b.Run("parallel", func(b *testing.B) {
|
b.Run("parallel", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
|
@ -177,7 +177,7 @@ type gcHandler struct {
|
||||||
func (g gcHandler) handleKV(k, _ []byte) error {
|
func (g gcHandler) handleKV(k, _ []byte) error {
|
||||||
o, err := garbageFromKV(k)
|
o, err := garbageFromKV(k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parse garbage object: %w", err)
|
return fmt.Errorf("could not parse garbage object: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return g.h(o)
|
return g.h(o)
|
||||||
|
@ -190,7 +190,7 @@ type graveyardHandler struct {
|
||||||
func (g graveyardHandler) handleKV(k, v []byte) error {
|
func (g graveyardHandler) handleKV(k, v []byte) error {
|
||||||
o, err := graveFromKV(k, v)
|
o, err := graveFromKV(k, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parse grave: %w", err)
|
return fmt.Errorf("could not parse grave: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return g.h(o)
|
return g.h(o)
|
||||||
|
@ -240,7 +240,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address)
|
||||||
func garbageFromKV(k []byte) (res GarbageObject, err error) {
|
func garbageFromKV(k []byte) (res GarbageObject, err error) {
|
||||||
err = decodeAddressFromKey(&res.addr, k)
|
err = decodeAddressFromKey(&res.addr, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("parse address: %w", err)
|
err = fmt.Errorf("could not parse address: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
|
@ -373,7 +373,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
|
||||||
if data != nil {
|
if data != nil {
|
||||||
err := targetBucket.Delete(tombKey)
|
err := targetBucket.Delete(tombKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err)
|
return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,8 +87,7 @@ type CountAliveObjectsInContainerPrm struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListWithCursor lists physical objects available in metabase starting from
|
// ListWithCursor lists physical objects available in metabase starting from
|
||||||
// cursor. Includes objects of all types. Does not include inhumed and expired
|
// cursor. Includes objects of all types. Does not include inhumed objects.
|
||||||
// objects.
|
|
||||||
// Use cursor value from response for consecutive requests.
|
// Use cursor value from response for consecutive requests.
|
||||||
//
|
//
|
||||||
// Returns ErrEndOfListing if there are no more objects to return or count
|
// Returns ErrEndOfListing if there are no more objects to return or count
|
||||||
|
@ -144,8 +143,6 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
|
||||||
|
|
||||||
rawAddr := make([]byte, cidSize, addressKeySize)
|
rawAddr := make([]byte, cidSize, addressKeySize)
|
||||||
|
|
||||||
currEpoch := db.epochState.CurrentEpoch()
|
|
||||||
|
|
||||||
loop:
|
loop:
|
||||||
for ; name != nil; name, _ = c.Next() {
|
for ; name != nil; name, _ = c.Next() {
|
||||||
cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name)
|
cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name)
|
||||||
|
@ -170,7 +167,7 @@ loop:
|
||||||
if bkt != nil {
|
if bkt != nil {
|
||||||
copy(rawAddr, cidRaw)
|
copy(rawAddr, cidRaw)
|
||||||
result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
|
result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
|
||||||
result, count, cursor, threshold, currEpoch)
|
result, count, cursor, threshold)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -215,7 +212,6 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
|
||||||
limit int, // stop listing at `limit` items in result
|
limit int, // stop listing at `limit` items in result
|
||||||
cursor *Cursor, // start from cursor object
|
cursor *Cursor, // start from cursor object
|
||||||
threshold bool, // ignore cursor and start immediately
|
threshold bool, // ignore cursor and start immediately
|
||||||
currEpoch uint64,
|
|
||||||
) ([]objectcore.Info, []byte, *Cursor, error) {
|
) ([]objectcore.Info, []byte, *Cursor, error) {
|
||||||
if cursor == nil {
|
if cursor == nil {
|
||||||
cursor = new(Cursor)
|
cursor = new(Cursor)
|
||||||
|
@ -247,19 +243,13 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var o objectSDK.Object
|
|
||||||
if err := o.Unmarshal(bytes.Clone(v)); err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
|
|
||||||
if !objectLocked(bkt.Tx(), cnt, obj) && hasExpEpoch && expEpoch < currEpoch {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var isLinkingObj bool
|
var isLinkingObj bool
|
||||||
var ecInfo *objectcore.ECInfo
|
var ecInfo *objectcore.ECInfo
|
||||||
if objType == objectSDK.TypeRegular {
|
if objType == objectSDK.TypeRegular {
|
||||||
|
var o objectSDK.Object
|
||||||
|
if err := o.Unmarshal(bytes.Clone(v)); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
isLinkingObj = isLinkObject(&o)
|
isLinkingObj = isLinkObject(&o)
|
||||||
ecHeader := o.ECHeader()
|
ecHeader := o.ECHeader()
|
||||||
if ecHeader != nil {
|
if ecHeader != nil {
|
||||||
|
|
|
@ -3,17 +3,14 @@ package meta_test
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"strconv"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
|
||||||
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
|
@ -21,8 +18,6 @@ import (
|
||||||
|
|
||||||
func BenchmarkListWithCursor(b *testing.B) {
|
func BenchmarkListWithCursor(b *testing.B) {
|
||||||
db := listWithCursorPrepareDB(b)
|
db := listWithCursorPrepareDB(b)
|
||||||
defer func() { require.NoError(b, db.Close(context.Background())) }()
|
|
||||||
|
|
||||||
b.Run("1 item", func(b *testing.B) {
|
b.Run("1 item", func(b *testing.B) {
|
||||||
benchmarkListWithCursor(b, db, 1)
|
benchmarkListWithCursor(b, db, 1)
|
||||||
})
|
})
|
||||||
|
@ -38,6 +33,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
|
||||||
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
|
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
|
||||||
NoSync: true,
|
NoSync: true,
|
||||||
})) // faster single-thread generation
|
})) // faster single-thread generation
|
||||||
|
defer func() { require.NoError(b, db.Close(context.Background())) }()
|
||||||
|
|
||||||
obj := testutil.GenerateObject()
|
obj := testutil.GenerateObject()
|
||||||
for i := range 100_000 { // should be a multiple of all batch sizes
|
for i := range 100_000 { // should be a multiple of all batch sizes
|
||||||
|
@ -59,7 +55,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
|
||||||
for range b.N {
|
for range b.N {
|
||||||
res, err := db.ListWithCursor(context.Background(), prm)
|
res, err := db.ListWithCursor(context.Background(), prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, meta.ErrEndOfListing) {
|
if err != meta.ErrEndOfListing {
|
||||||
b.Fatalf("error: %v", err)
|
b.Fatalf("error: %v", err)
|
||||||
}
|
}
|
||||||
prm.SetCursor(nil)
|
prm.SetCursor(nil)
|
||||||
|
@ -74,16 +70,14 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
|
||||||
func TestLisObjectsWithCursor(t *testing.T) {
|
func TestLisObjectsWithCursor(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
const (
|
db := newDB(t)
|
||||||
currEpoch = 100
|
|
||||||
expEpoch = currEpoch - 1
|
|
||||||
containers = 5
|
|
||||||
total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired
|
|
||||||
)
|
|
||||||
|
|
||||||
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
|
|
||||||
defer func() { require.NoError(t, db.Close(context.Background())) }()
|
defer func() { require.NoError(t, db.Close(context.Background())) }()
|
||||||
|
|
||||||
|
const (
|
||||||
|
containers = 5
|
||||||
|
total = containers * 4 // regular + ts + child + lock
|
||||||
|
)
|
||||||
|
|
||||||
expected := make([]object.Info, 0, total)
|
expected := make([]object.Info, 0, total)
|
||||||
|
|
||||||
// fill metabase with objects
|
// fill metabase with objects
|
||||||
|
@ -132,26 +126,6 @@ func TestLisObjectsWithCursor(t *testing.T) {
|
||||||
err = putBig(db, child)
|
err = putBig(db, child)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
|
expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
|
||||||
|
|
||||||
// add expired object (do not include into expected)
|
|
||||||
obj = testutil.GenerateObjectWithCID(containerID)
|
|
||||||
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
|
|
||||||
require.NoError(t, metaPut(db, obj, nil))
|
|
||||||
|
|
||||||
// add non-expired object (include into expected)
|
|
||||||
obj = testutil.GenerateObjectWithCID(containerID)
|
|
||||||
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch))
|
|
||||||
require.NoError(t, metaPut(db, obj, nil))
|
|
||||||
expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
|
|
||||||
|
|
||||||
// add locked expired object (include into expected)
|
|
||||||
obj = testutil.GenerateObjectWithCID(containerID)
|
|
||||||
objID := oidtest.ID()
|
|
||||||
obj.SetID(objID)
|
|
||||||
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
|
|
||||||
require.NoError(t, metaPut(db, obj, nil))
|
|
||||||
require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID}))
|
|
||||||
expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("success with various count", func(t *testing.T) {
|
t.Run("success with various count", func(t *testing.T) {
|
||||||
|
|
|
@ -19,7 +19,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
|
||||||
|
|
||||||
if !db.mode.NoMetabase() {
|
if !db.mode.NoMetabase() {
|
||||||
if err := db.Close(ctx); err != nil {
|
if err := db.Close(ctx); err != nil {
|
||||||
return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
|
return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
|
||||||
err = db.Init(ctx)
|
err = db.Init(ctx)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
|
return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -180,18 +180,18 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
|
||||||
|
|
||||||
err := putUniqueIndexes(tx, obj, si, id)
|
err := putUniqueIndexes(tx, obj, si, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("put unique indexes: %w", err)
|
return fmt.Errorf("can't put unique indexes: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = updateListIndexes(tx, obj, putListIndexItem)
|
err = updateListIndexes(tx, obj, putListIndexItem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("put list indexes: %w", err)
|
return fmt.Errorf("can't put list indexes: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if indexAttributes {
|
if indexAttributes {
|
||||||
err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
|
err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("put fake bucket tree indexes: %w", err)
|
return fmt.Errorf("can't put fake bucket tree indexes: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,7 +250,7 @@ func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, ad
|
||||||
}
|
}
|
||||||
rawObject, err := obj.CutPayload().Marshal()
|
rawObject, err := obj.CutPayload().Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("marshal object header: %w", err)
|
return fmt.Errorf("can't marshal object header: %w", err)
|
||||||
}
|
}
|
||||||
return putUniqueIndexItem(tx, namedBucketItem{
|
return putUniqueIndexItem(tx, namedBucketItem{
|
||||||
name: bucketName,
|
name: bucketName,
|
||||||
|
@ -475,7 +475,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck
|
||||||
func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error {
|
func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error {
|
||||||
bkt, err := createBucketLikelyExists(tx, item.name)
|
bkt, err := createBucketLikelyExists(tx, item.name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create index %v: %w", item.name, err)
|
return fmt.Errorf("can't create index %v: %w", item.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := update(bkt.Get(item.key), item.val)
|
data, err := update(bkt.Get(item.key), item.val)
|
||||||
|
@ -492,12 +492,12 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
|
||||||
func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
|
func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
|
||||||
bkt, err := createBucketLikelyExists(tx, item.name)
|
bkt, err := createBucketLikelyExists(tx, item.name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create index %v: %w", item.name, err)
|
return fmt.Errorf("can't create index %v: %w", item.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
|
fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err)
|
return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fkbtRoot.Put(item.val, zeroValue)
|
return fkbtRoot.Put(item.val, zeroValue)
|
||||||
|
@ -506,19 +506,19 @@ func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
|
||||||
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
|
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
|
||||||
bkt, err := createBucketLikelyExists(tx, item.name)
|
bkt, err := createBucketLikelyExists(tx, item.name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create index %v: %w", item.name, err)
|
return fmt.Errorf("can't create index %v: %w", item.name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lst, err := decodeList(bkt.Get(item.key))
|
lst, err := decodeList(bkt.Get(item.key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("decode leaf list %v: %w", item.key, err)
|
return fmt.Errorf("can't decode leaf list %v: %w", item.key, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lst = append(lst, item.val)
|
lst = append(lst, item.val)
|
||||||
|
|
||||||
encodedLst, err := encodeList(lst)
|
encodedLst, err := encodeList(lst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("encode leaf list %v: %w", item.key, err)
|
return fmt.Errorf("can't encode leaf list %v: %w", item.key, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return bkt.Put(item.key, encodedLst)
|
return bkt.Put(item.key, encodedLst)
|
||||||
|
|
|
@ -565,7 +565,7 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt
|
||||||
case v2object.FilterHeaderContainerID: // support deprecated field
|
case v2object.FilterHeaderContainerID: // support deprecated field
|
||||||
err := res.cnr.DecodeString(filters[i].Value())
|
err := res.cnr.DecodeString(filters[i].Value())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return filterGroup{}, fmt.Errorf("parse container id: %w", err)
|
return filterGroup{}, fmt.Errorf("can't parse container id: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res.withCnrFilter = true
|
res.withCnrFilter = true
|
||||||
|
|
|
@ -32,13 +32,13 @@ func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := db.openDB(ctx, mode); err != nil {
|
if err := db.openDB(ctx, mode); err != nil {
|
||||||
return nil, fmt.Errorf("open metabase: %w", err)
|
return nil, fmt.Errorf("failed to open metabase: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := db.readShardID()
|
id, err := db.readShardID()
|
||||||
|
|
||||||
if cErr := db.close(); cErr != nil {
|
if cErr := db.close(); cErr != nil {
|
||||||
err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
|
err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
|
||||||
}
|
}
|
||||||
|
|
||||||
return id, metaerr.Wrap(err)
|
return id, metaerr.Wrap(err)
|
||||||
|
@ -70,7 +70,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := db.openDB(ctx, mode); err != nil {
|
if err := db.openDB(ctx, mode); err != nil {
|
||||||
return fmt.Errorf("open metabase: %w", err)
|
return fmt.Errorf("failed to open metabase: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := db.writeShardID(id)
|
err := db.writeShardID(id)
|
||||||
|
@ -79,7 +79,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err
|
||||||
}
|
}
|
||||||
|
|
||||||
if cErr := db.close(); cErr != nil {
|
if cErr := db.close(); cErr != nil {
|
||||||
err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
|
err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
|
||||||
}
|
}
|
||||||
|
|
||||||
return metaerr.Wrap(err)
|
return metaerr.Wrap(err)
|
||||||
|
|
|
@ -95,7 +95,7 @@ func compactDB(db *bbolt.DB) error {
|
||||||
NoSync: true,
|
NoSync: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("open new metabase to compact: %w", err)
|
return fmt.Errorf("can't open new metabase to compact: %w", err)
|
||||||
}
|
}
|
||||||
if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
|
if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
|
||||||
return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
|
return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
|
||||||
|
@ -292,7 +292,7 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
|
||||||
}
|
}
|
||||||
expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
|
expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parse expiration epoch: %w", err)
|
return fmt.Errorf("could not parse expiration epoch: %w", err)
|
||||||
}
|
}
|
||||||
expirationEpochBucket := b.Bucket(attrValue)
|
expirationEpochBucket := b.Bucket(attrValue)
|
||||||
attrKeyValueC := expirationEpochBucket.Cursor()
|
attrKeyValueC := expirationEpochBucket.Cursor()
|
||||||
|
@ -399,7 +399,7 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([]
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
attr, ok := attributeFromAttributeBucket(key)
|
attr, ok := attributeFromAttributeBucket(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
|
return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
|
||||||
}
|
}
|
||||||
if !IsAtrributeIndexed(attr) {
|
if !IsAtrributeIndexed(attr) {
|
||||||
keysToDrop = append(keysToDrop, key)
|
keysToDrop = append(keysToDrop, key)
|
||||||
|
@ -407,7 +407,7 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([]
|
||||||
}
|
}
|
||||||
contID, ok := cidFromAttributeBucket(key)
|
contID, ok := cidFromAttributeBucket(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
|
return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
|
||||||
}
|
}
|
||||||
info, err := cs.Info(contID)
|
info, err := cs.Info(contID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -231,11 +231,11 @@ func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) {
|
||||||
epoch := binary.BigEndian.Uint64(key)
|
epoch := binary.BigEndian.Uint64(key)
|
||||||
var cnr cid.ID
|
var cnr cid.ID
|
||||||
if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
|
if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
|
||||||
return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err)
|
return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err)
|
||||||
}
|
}
|
||||||
var obj oid.ID
|
var obj oid.ID
|
||||||
if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
|
if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
|
||||||
return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err)
|
return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err)
|
||||||
}
|
}
|
||||||
return epoch, cnr, obj, nil
|
return epoch, cnr, obj, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
|
||||||
|
|
||||||
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
|
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create auxiliary bucket: %w", err)
|
return fmt.Errorf("can't create auxiliary bucket: %w", err)
|
||||||
}
|
}
|
||||||
return b.Put(versionKey, data)
|
return b.Put(versionKey, data)
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,7 +106,7 @@ func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
|
return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.mode = m
|
t.mode = m
|
||||||
|
@ -128,7 +128,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
|
||||||
readOnly := m.ReadOnly()
|
readOnly := m.ReadOnly()
|
||||||
err := util.MkdirAllX(filepath.Dir(t.path), t.perm)
|
err := util.MkdirAllX(filepath.Dir(t.path), t.perm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err))
|
return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := *bbolt.DefaultOptions
|
opts := *bbolt.DefaultOptions
|
||||||
|
@ -139,7 +139,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
|
||||||
|
|
||||||
t.db, err = bbolt.Open(t.path, t.perm, &opts)
|
t.db, err = bbolt.Open(t.path, t.perm, &opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err))
|
return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
t.db.MaxBatchSize = t.maxBatchSize
|
t.db.MaxBatchSize = t.maxBatchSize
|
||||||
|
@ -1360,7 +1360,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err))
|
return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err))
|
||||||
}
|
}
|
||||||
success = true
|
success = true
|
||||||
return ids, nil
|
return ids, nil
|
||||||
|
@ -1504,7 +1504,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
|
||||||
|
|
||||||
var contID cidSDK.ID
|
var contID cidSDK.ID
|
||||||
if err := contID.Decode(k[:32]); err != nil {
|
if err := contID.Decode(k[:32]); err != nil {
|
||||||
return fmt.Errorf("decode container ID: %w", err)
|
return fmt.Errorf("failed to decode containerID: %w", err)
|
||||||
}
|
}
|
||||||
res.Items = append(res.Items, ContainerIDTreeID{
|
res.Items = append(res.Items, ContainerIDTreeID{
|
||||||
CID: contID,
|
CID: contID,
|
||||||
|
|
|
@ -36,7 +36,7 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
|
||||||
|
|
||||||
size, err := s.metaBase.ContainerSize(prm.cnr)
|
size, err := s.metaBase.ContainerSize(prm.cnr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err)
|
return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ContainerSizeRes{
|
return ContainerSizeRes{
|
||||||
|
@ -71,7 +71,7 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont
|
||||||
|
|
||||||
counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID)
|
counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err)
|
return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ContainerCountRes{
|
return ContainerCountRes{
|
||||||
|
|
|
@ -38,7 +38,7 @@ func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err err
|
||||||
|
|
||||||
err = s.SetMode(ctx, mode.DegradedReadOnly)
|
err = s.SetMode(ctx, mode.DegradedReadOnly)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("switch to mode %s", mode.Mode(mode.DegradedReadOnly))
|
return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ func (s *Shard) Open(ctx context.Context) error {
|
||||||
for j := i + 1; j < len(components); j++ {
|
for j := i + 1; j < len(components); j++ {
|
||||||
if err := components[j].Open(ctx, m); err != nil {
|
if err := components[j].Open(ctx, m); err != nil {
|
||||||
// Other components must be opened, fail.
|
// Other components must be opened, fail.
|
||||||
return fmt.Errorf("open %T: %w", components[j], err)
|
return fmt.Errorf("could not open %T: %w", components[j], err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = s.handleMetabaseFailure(ctx, "open", err)
|
err = s.handleMetabaseFailure(ctx, "open", err)
|
||||||
|
@ -83,7 +83,7 @@ func (s *Shard) Open(ctx context.Context) error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("open %T: %w", component, err)
|
return fmt.Errorf("could not open %T: %w", component, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -184,7 +184,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("initialize %T: %w", component, err)
|
return fmt.Errorf("could not initialize %T: %w", component, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -205,7 +205,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
|
||||||
|
|
||||||
err := s.metaBase.Reset()
|
err := s.metaBase.Reset()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("reset metabase: %w", err)
|
return fmt.Errorf("could not reset metabase: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
withCount := true
|
withCount := true
|
||||||
|
@ -254,12 +254,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
|
||||||
|
|
||||||
err = errors.Join(egErr, itErr)
|
err = errors.Join(egErr, itErr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("put objects to the meta: %w", err)
|
return fmt.Errorf("could not put objects to the meta: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.metaBase.SyncCounters()
|
err = s.metaBase.SyncCounters()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("sync object counters: %w", err)
|
return fmt.Errorf("could not sync object counters: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
success = true
|
success = true
|
||||||
|
@ -272,7 +272,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
|
||||||
if err := obj.Unmarshal(data); err != nil {
|
if err := obj.Unmarshal(data); err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
|
s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
|
||||||
zap.Stringer("address", addr),
|
zap.Stringer("address", addr),
|
||||||
zap.Error(err))
|
zap.String("err", err.Error()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -318,7 +318,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
|
||||||
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
|
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
|
||||||
var lock objectSDK.Lock
|
var lock objectSDK.Lock
|
||||||
if err := lock.Unmarshal(obj.Payload()); err != nil {
|
if err := lock.Unmarshal(obj.Payload()); err != nil {
|
||||||
return fmt.Errorf("unmarshal lock content: %w", err)
|
return fmt.Errorf("could not unmarshal lock content: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
locked := make([]oid.ID, lock.NumberOfMembers())
|
locked := make([]oid.ID, lock.NumberOfMembers())
|
||||||
|
@ -328,7 +328,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err
|
||||||
id, _ := obj.ID()
|
id, _ := obj.ID()
|
||||||
err := s.metaBase.Lock(ctx, cnr, id, locked)
|
err := s.metaBase.Lock(ctx, cnr, id, locked)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("lock objects: %w", err)
|
return fmt.Errorf("could not lock objects: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -337,7 +337,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
|
||||||
tombstone := objectSDK.NewTombstone()
|
tombstone := objectSDK.NewTombstone()
|
||||||
|
|
||||||
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
|
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
|
||||||
return fmt.Errorf("unmarshal tombstone content: %w", err)
|
return fmt.Errorf("could not unmarshal tombstone content: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tombAddr := object.AddressOf(obj)
|
tombAddr := object.AddressOf(obj)
|
||||||
|
@ -358,7 +358,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
|
||||||
|
|
||||||
_, err := s.metaBase.Inhume(ctx, inhumePrm)
|
_, err := s.metaBase.Inhume(ctx, inhumePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("inhume objects: %w", err)
|
return fmt.Errorf("could not inhume objects: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,7 +112,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
|
s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
|
||||||
zap.Stringer("object", addr),
|
zap.Stringer("object", addr),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
|
||||||
if err != nil && !client.IsErrObjectNotFound(err) {
|
if err != nil && !client.IsErrObjectNotFound(err) {
|
||||||
s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
|
s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
|
||||||
zap.Stringer("object_address", addr),
|
zap.Stringer("object_address", addr),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -205,7 +205,7 @@ func (gc *gc) handleEvent(ctx context.Context, event Event) {
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
|
gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
v.prevGroup.Done()
|
v.prevGroup.Done()
|
||||||
|
@ -313,7 +313,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
||||||
err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
|
err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
|
s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -334,7 +334,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
|
s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
result.success = false
|
result.success = false
|
||||||
}
|
}
|
||||||
|
@ -396,7 +396,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
|
||||||
})
|
})
|
||||||
|
|
||||||
if err = errGroup.Wait(); err != nil {
|
if err = errGroup.Wait(); err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err))
|
s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -429,7 +429,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
|
||||||
res, err := s.metaBase.Inhume(ctx, inhumePrm)
|
res, err := s.metaBase.Inhume(ctx, inhumePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects,
|
s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -584,7 +584,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
|
||||||
})
|
})
|
||||||
|
|
||||||
if err = errGroup.Wait(); err != nil {
|
if err = errGroup.Wait(); err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err))
|
s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -637,7 +637,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
|
||||||
res, err := s.metaBase.InhumeTombstones(ctx, tss)
|
res, err := s.metaBase.InhumeTombstones(ctx, tss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage,
|
s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -665,7 +665,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
||||||
unlocked, err := s.metaBase.FreeLockedBy(lockers)
|
unlocked, err := s.metaBase.FreeLockedBy(lockers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
|
s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -678,7 +678,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
||||||
res, err := s.metaBase.Inhume(ctx, pInhume)
|
res, err := s.metaBase.Inhume(ctx, pInhume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage,
|
s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -722,7 +722,7 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
|
||||||
_, err := s.metaBase.FreeLockedBy(lockers)
|
_, err := s.metaBase.FreeLockedBy(lockers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
|
s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
|
@ -175,7 +175,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
|
||||||
|
|
||||||
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
|
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err)
|
return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
storageID := mExRes.StorageID()
|
storageID := mExRes.StorageID()
|
||||||
|
|
|
@ -36,7 +36,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
|
||||||
modeDegraded := s.GetMode().NoMetabase()
|
modeDegraded := s.GetMode().NoMetabase()
|
||||||
if !modeDegraded {
|
if !modeDegraded {
|
||||||
if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil {
|
if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil {
|
||||||
err = fmt.Errorf("read shard id from metabase: %w", err)
|
err = fmt.Errorf("failed to read shard id from metabase: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
|
||||||
|
|
||||||
if len(idFromMetabase) == 0 && !modeDegraded {
|
if len(idFromMetabase) == 0 && !modeDegraded {
|
||||||
if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil {
|
if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil {
|
||||||
err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr))
|
err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|
|
@ -110,7 +110,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
|
s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
|
||||||
|
|
||||||
lst, err := s.metaBase.Containers(ctx)
|
lst, err := s.metaBase.Containers(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return res, fmt.Errorf("list stored containers: %w", err)
|
return res, fmt.Errorf("can't list stored containers: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
filters := objectSDK.NewSearchFilters()
|
filters := objectSDK.NewSearchFilters()
|
||||||
|
@ -124,7 +124,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
|
s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
|
||||||
zap.Stringer("cid", lst[i]),
|
zap.Stringer("cid", lst[i]),
|
||||||
zap.Error(err),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
@ -149,7 +149,7 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo
|
||||||
|
|
||||||
containers, err := s.metaBase.Containers(ctx)
|
containers, err := s.metaBase.Containers(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err)
|
return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ListContainersRes{
|
return ListContainersRes{
|
||||||
|
@ -180,7 +180,7 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
|
||||||
metaPrm.SetCursor(prm.cursor)
|
metaPrm.SetCursor(prm.cursor)
|
||||||
res, err := s.metaBase.ListWithCursor(ctx, metaPrm)
|
res, err := s.metaBase.ListWithCursor(ctx, metaPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err)
|
return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ListWithCursorRes{
|
return ListWithCursorRes{
|
||||||
|
@ -208,7 +208,7 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai
|
||||||
metaPrm.Handler = prm.Handler
|
metaPrm.Handler = prm.Handler
|
||||||
err := s.metaBase.IterateOverContainers(ctx, metaPrm)
|
err := s.metaBase.IterateOverContainers(ctx, metaPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("iterate over containers: %w", err)
|
return fmt.Errorf("could not iterate over containers: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -235,7 +235,7 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
|
||||||
metaPrm.Handler = prm.Handler
|
metaPrm.Handler = prm.Handler
|
||||||
err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
|
err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("iterate over objects: %w", err)
|
return fmt.Errorf("could not iterate over objects: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -258,7 +258,7 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive
|
||||||
metaPrm.ContainerID = prm.ContainerID
|
metaPrm.ContainerID = prm.ContainerID
|
||||||
count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
|
count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("count alive objects in bucket: %w", err)
|
return 0, fmt.Errorf("could not count alive objects in bucket: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return count, nil
|
return count, nil
|
||||||
|
|
|
@ -76,12 +76,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
|
||||||
if err != nil || !tryCache {
|
if err != nil || !tryCache {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
|
s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
|
||||||
zap.Error(err))
|
zap.String("err", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err = s.blobStor.Put(ctx, putPrm)
|
res, err = s.blobStor.Put(ctx, putPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err)
|
return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// may we need to handle this case in a special way
|
// may we need to handle this case in a special way
|
||||||
// since the object has been successfully written to BlobStor
|
// since the object has been successfully written to BlobStor
|
||||||
return PutRes{}, fmt.Errorf("put object to metabase: %w", err)
|
return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.Inserted {
|
if res.Inserted {
|
||||||
|
|
|
@ -67,7 +67,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
|
||||||
|
|
||||||
mRes, err := s.metaBase.Select(ctx, selectPrm)
|
mRes, err := s.metaBase.Select(ctx, selectPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err)
|
return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return SelectRes{
|
return SelectRes{
|
||||||
|
|
|
@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error {
|
||||||
return b.ForEach(func(k, _ []byte) error {
|
return b.ForEach(func(k, _ []byte) error {
|
||||||
err := addr.DecodeString(string(k))
|
err := addr.DecodeString(string(k))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parse object address: %w", err)
|
return fmt.Errorf("could not parse object address: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f(addr)
|
return f(addr)
|
||||||
|
|
|
@ -83,7 +83,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
|
||||||
}
|
}
|
||||||
if !shrink {
|
if !shrink {
|
||||||
if err := c.fsTree.Close(ctx); err != nil {
|
if err := c.fsTree.Close(ctx); err != nil {
|
||||||
return fmt.Errorf("close write-cache storage: %w", err)
|
return fmt.Errorf("can't close write-cache storage: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -98,16 +98,16 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
|
||||||
if errors.Is(err, errIterationCompleted) {
|
if errors.Is(err, errIterationCompleted) {
|
||||||
empty = false
|
empty = false
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("check write-cache items: %w", err)
|
return fmt.Errorf("failed to check write-cache items: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := c.fsTree.Close(ctx); err != nil {
|
if err := c.fsTree.Close(ctx); err != nil {
|
||||||
return fmt.Errorf("close write-cache storage: %w", err)
|
return fmt.Errorf("can't close write-cache storage: %w", err)
|
||||||
}
|
}
|
||||||
if empty {
|
if empty {
|
||||||
err := os.RemoveAll(c.path)
|
err := os.RemoveAll(c.path)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return fmt.Errorf("remove write-cache files: %w", err)
|
return fmt.Errorf("failed to remove write-cache files: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)
|
c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)
|
||||||
|
|
|
@ -31,10 +31,10 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
|
||||||
fstree.WithFileCounter(c.counter),
|
fstree.WithFileCounter(c.counter),
|
||||||
)
|
)
|
||||||
if err := c.fsTree.Open(mod); err != nil {
|
if err := c.fsTree.Open(mod); err != nil {
|
||||||
return fmt.Errorf("open FSTree: %w", err)
|
return fmt.Errorf("could not open FSTree: %w", err)
|
||||||
}
|
}
|
||||||
if err := c.fsTree.Init(); err != nil {
|
if err := c.fsTree.Init(); err != nil {
|
||||||
return fmt.Errorf("init FSTree: %w", err)
|
return fmt.Errorf("could not init FSTree: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -25,11 +25,11 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("check write-cache database existence: %w", err)
|
return fmt.Errorf("could not check write-cache database existence: %w", err)
|
||||||
}
|
}
|
||||||
db, err := OpenDB(c.path, true, os.OpenFile)
|
db, err := OpenDB(c.path, true, os.OpenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("open write-cache database: %w", err)
|
return fmt.Errorf("could not open write-cache database: %w", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
|
|
|
@ -29,7 +29,7 @@ func (c *Client) BalanceOf(id user.ID) (*big.Int, error) {
|
||||||
|
|
||||||
amount, err := client.BigIntFromStackItem(prms[0])
|
amount, err := client.BigIntFromStackItem(prms[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err)
|
return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err)
|
||||||
}
|
}
|
||||||
return amount, nil
|
return amount, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ func (c *Client) Decimals() (uint32, error) {
|
||||||
|
|
||||||
decimals, err := client.IntFromStackItem(prms[0])
|
decimals, err := client.IntFromStackItem(prms[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err)
|
return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err)
|
||||||
}
|
}
|
||||||
return uint32(decimals), nil
|
return uint32(decimals), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ func (c *Client) TransferX(ctx context.Context, p TransferPrm) error {
|
||||||
|
|
||||||
_, err = c.client.Invoke(ctx, prm)
|
_, err = c.client.Invoke(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invoke method (%s): %w", transferXMethod, err)
|
return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -196,7 +196,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F
|
||||||
|
|
||||||
txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
|
txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err)
|
return InvokeRes{}, fmt.Errorf("could not invoke %s: %w", method, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Debug(ctx, logs.ClientNeoClientInvoke,
|
c.logger.Debug(ctx, logs.ClientNeoClientInvoke,
|
||||||
|
@ -210,7 +210,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F
|
||||||
|
|
||||||
// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element.
|
// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element.
|
||||||
// If cb returns an error, the session is closed and this error is returned as-is.
|
// If cb returns an error, the session is closed and this error is returned as-is.
|
||||||
// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
|
// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
|
||||||
// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
|
// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
|
||||||
// The default batchSize is 100, the default limit from neo-go.
|
// The default batchSize is 100, the default limit from neo-go.
|
||||||
func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error {
|
func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error {
|
||||||
|
@ -390,7 +390,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
|
||||||
height, err = c.rpcActor.GetBlockCount()
|
height, err = c.rpcActor.GetBlockCount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
|
c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -404,7 +404,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
|
||||||
newHeight, err = c.rpcActor.GetBlockCount()
|
newHeight, err = c.rpcActor.GetBlockCount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
|
c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
|
||||||
zap.Error(err))
|
zap.String("error", err.Error()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,7 +509,7 @@ func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
|
||||||
|
|
||||||
list, err := c.roleList(noderoles.NeoFSAlphabet)
|
list, err := c.roleList(noderoles.NeoFSAlphabet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("get alphabet nodes role list: %w", err)
|
return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return list, nil
|
return list, nil
|
||||||
|
@ -523,7 +523,7 @@ func (c *Client) GetDesignateHash() util.Uint160 {
|
||||||
func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) {
|
func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) {
|
||||||
height, err := c.rpcActor.GetBlockCount()
|
height, err := c.rpcActor.GetBlockCount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("get chain height: %w", err)
|
return nil, fmt.Errorf("can't get chain height: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.rolemgmt.GetDesignatedByRole(r, height)
|
return c.rolemgmt.GetDesignatedByRole(r, height)
|
||||||
|
|
|
@ -2,7 +2,9 @@ package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||||
|
@ -14,36 +16,27 @@ import (
|
||||||
//
|
//
|
||||||
// If remote RPC does not support neo-go session API, fallback to List() method.
|
// If remote RPC does not support neo-go session API, fallback to List() method.
|
||||||
func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
|
func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
|
||||||
var cidList []cid.ID
|
|
||||||
var err error
|
|
||||||
|
|
||||||
cb := func(id cid.ID) error {
|
|
||||||
cidList = append(cidList, id)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err = c.IterateContainersOf(idUser, cb); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cidList, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterateContainers iterates over a list of container identifiers
|
|
||||||
// belonging to the specified user of FrostFS system and executes
|
|
||||||
// `cb` on each element. If idUser is nil, calls it on the list of all containers.
|
|
||||||
func (c *Client) IterateContainersOf(idUser *user.ID, cb func(item cid.ID) error) error {
|
|
||||||
var rawID []byte
|
var rawID []byte
|
||||||
|
|
||||||
if idUser != nil {
|
if idUser != nil {
|
||||||
rawID = idUser.WalletBytes()
|
rawID = idUser.WalletBytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
itemCb := func(item stackitem.Item) error {
|
var cidList []cid.ID
|
||||||
id, err := getCIDfromStackItem(item)
|
cb := func(item stackitem.Item) error {
|
||||||
|
rawID, err := client.BytesFromStackItem(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err)
|
||||||
}
|
}
|
||||||
if err = cb(id); err != nil {
|
|
||||||
return err
|
var id cid.ID
|
||||||
|
|
||||||
|
err = id.Decode(rawID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("decode container ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cidList = append(cidList, id)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,10 +50,13 @@ func (c *Client) IterateContainersOf(idUser *user.ID, cb func(item cid.ID) error
|
||||||
const batchSize = 512
|
const batchSize = 512
|
||||||
|
|
||||||
cnrHash := c.client.ContractAddress()
|
cnrHash := c.client.ContractAddress()
|
||||||
err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID)
|
err := c.client.Morph().TestInvokeIterator(cb, batchSize, cnrHash, containersOfMethod, rawID)
|
||||||
if err != nil && errors.Is(err, unwrap.ErrNoSessionID) {
|
if err != nil {
|
||||||
return c.iterate(idUser, cb)
|
if errors.Is(err, unwrap.ErrNoSessionID) {
|
||||||
|
return c.list(idUser)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return cidList, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
|
||||||
|
|
||||||
res, err := c.client.Invoke(ctx, prm)
|
res, err := c.client.Invoke(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err)
|
return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err)
|
||||||
}
|
}
|
||||||
return res.VUB, nil
|
return res.VUB, nil
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue