Compare commits

..

1 commit

Author SHA1 Message Date
09c66fc93c
WIP cli: Allow to upload object parts in parallel
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-11-29 15:46:46 +03:00
272 changed files with 3069 additions and 2819 deletions

View file

@ -1,28 +0,0 @@
name: OCI image
on:
push:
workflow_dispatch:
jobs:
image:
name: Build container images
runs-on: docker
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Build OCI image
run: make images
- name: Push image to OCI registry
run: |
echo "$REGISTRY_PASSWORD" \
| docker login --username truecloudlab --password-stdin git.frostfs.info
make push-images
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}

View file

@ -18,7 +18,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23.5'
go-version: '1.23'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -89,7 +89,5 @@ linters:
- protogetter
- intrange
- tenv
- unconvert
- unparam
disable-all: true
fast: false

View file

@ -1,3 +0,0 @@
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.62.2
LINT_VERSION ?= 1.62.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
@ -139,15 +139,6 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
# Push FrostFS components' docker image to the registry
push-image-%:
@echo "⇒ Publish FrostFS $* docker image "
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
# Push all Docker images to the registry
.PHONY: push-images
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
# Run `make %` in Golang container
docker/%:
docker run --rm -t \
@ -279,12 +270,10 @@ env-up: all
echo "Frostfs contracts not found"; exit 1; \
fi
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
--storage-wallet ./dev/storage/wallet01.json \
--storage-wallet ./dev/storage/wallet02.json \
--storage-wallet ./dev/storage/wallet03.json \
--storage-wallet ./dev/storage/wallet04.json
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
make locode-download; \
fi

View file

@ -28,7 +28,6 @@ const (
var (
errNoPathsFound = errors.New("no metabase paths found")
errNoMorphEndpointsFound = errors.New("no morph endpoints found")
errUpgradeFailed = errors.New("upgrade failed")
)
var UpgradeCmd = &cobra.Command{
@ -92,19 +91,14 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil {
return err
}
allSuccess := true
for mb, ok := range result {
if ok {
cmd.Println(mb, ": success")
} else {
cmd.Println(mb, ": failed")
allSuccess = false
}
}
if allSuccess {
return nil
}
return errUpgradeFailed
return nil
}
func getMetabasePaths(appCfg *config.Config) ([]string, error) {
@ -141,7 +135,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er
if err != nil {
return nil, fmt.Errorf("resolve container contract hash: %w", err)
}
cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
if err != nil {
return nil, fmt.Errorf("create morph container client: %w", err)
}

View file

@ -253,7 +253,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items)
@ -305,7 +305,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID))
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
@ -319,7 +319,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
@ -365,7 +365,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
@ -415,7 +415,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
@ -492,17 +492,17 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
}
func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) {
func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool
res := make([]stackitem.Item, 0)
for !shouldStop {
items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize)
items, err := inv.TraverseIterator(sessionID, iter, batchSize)
if err != nil {
return nil, err
}
res = append(res, items...)
shouldStop = len(items) < iteratorBatchSize
shouldStop = len(items) < batchSize
}
return res, nil

View file

@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
@ -140,29 +141,60 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
}
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
w, err := wallet.NewWallet(walletPath)
if err != nil {
return fmt.Errorf("create wallet: %w", err)
}
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
password, err := config.GetStoragePassword(viper.GetViper(), label)
if err != nil {
return fmt.Errorf("can't fetch password: %w", err)
}
if label == "" {
label = constants.SingleAccountName
}
if err := w.CreateAccount(label, password); err != nil {
return fmt.Errorf("can't create account: %w", err)
}
return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
return refillGas(cmd, storageGasConfigFlag, true)
}
func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
// storage wallet path is not part of the config
storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
// wallet address is not part of the config
walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
var gasReceiver util.Uint160
if len(walletAddress) != 0 {
gasReceiver, err = address.StringToUint160(walletAddress)
if err != nil {
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
}
} else {
if storageWalletPath == "" {
return fmt.Errorf("missing wallet path (use '--%s <out.json>')", commonflags.StorageWalletFlag)
}
var w *wallet.Wallet
if createWallet {
w, err = wallet.NewWallet(storageWalletPath)
} else {
w, err = wallet.NewWalletFromFile(storageWalletPath)
}
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
if createWallet {
var password string
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
password, err := config.GetStoragePassword(viper.GetViper(), label)
if err != nil {
return fmt.Errorf("can't fetch password: %w", err)
}
if label == "" {
label = constants.SingleAccountName
}
if err := w.CreateAccount(label, password); err != nil {
return fmt.Errorf("can't create account: %w", err)
}
}
gasReceiver = w.Accounts[0].Contract.ScriptHash()
}
gasStr := viper.GetString(gasFlag)
gasAmount, err := helper.ParseGASAmount(gasStr)
@ -176,11 +208,9 @@ func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160)
}
bw := io.NewBufBinWriter()
for _, gasReceiver := range gasReceivers {
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
}
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
if bw.Err != nil {
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
}

View file

@ -1,12 +1,7 @@
package generate
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@ -38,27 +33,7 @@ var (
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
},
RunE: func(cmd *cobra.Command, _ []string) error {
storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
var gasReceivers []util.Uint160
for _, walletAddress := range walletAddresses {
addr, err := address.StringToUint160(walletAddress)
if err != nil {
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
}
gasReceivers = append(gasReceivers, addr)
}
for _, storageWalletPath := range storageWalletPaths {
w, err := wallet.NewWalletFromFile(storageWalletPath)
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
}
return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
},
}
GenerateAlphabetCmd = &cobra.Command{
@ -75,10 +50,10 @@ var (
func initRefillGasCmd() {
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
}
func initGenerateStorageCmd() {

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"math/big"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@ -40,8 +41,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
}
accHash := w.GetChangeAddress()
addr, _ := cmd.Flags().GetString(walletAccountFlag)
if addr != "" {
if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
accHash, err = address.StringToUint160(addr)
if err != nil {
return fmt.Errorf("invalid address: %s", addr)
@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't find account for %s", accHash)
}
prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
pass, err := input.ReadPassword(prompt)
if err != nil {
return fmt.Errorf("can't get password: %v", err)
@ -73,9 +73,16 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return err
}
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
if till <= 0 {
return errInvalidNotaryDepositLifetime
till := int64(defaultNotaryDepositLifetime)
tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
if err != nil {
return err
}
if tillStr != "" {
till, err = strconv.ParseInt(tillStr, 10, 64)
if err != nil || till <= 0 {
return errInvalidNotaryDepositLifetime
}
}
return transferGas(cmd, acc, accHash, gasAmount, till)

View file

@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
}
func init() {

View file

@ -20,32 +20,23 @@ const (
accountAddressFlag = "account"
)
func parseAddresses(cmd *cobra.Command) []util.Uint160 {
var addrs []util.Uint160
accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
for _, acc := range accs {
addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
addrs = append(addrs, addr)
}
return addrs
}
func addProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd)
err := processAccount(cmd, addrs, "addAccount")
acc, _ := cmd.Flags().GetString(accountAddressFlag)
addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "addAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
func removeProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd)
err := processAccount(cmd, addrs, "removeAccount")
acc, _ := cmd.Flags().GetString(accountAddressFlag)
addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "removeAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error {
func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
@ -63,9 +54,7 @@ func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) err
}
bw := io.NewBufBinWriter()
for _, addr := range addrs {
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
}
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err

View file

@ -29,15 +29,13 @@ var (
func initProxyAddAccount() {
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initProxyRemoveAccount() {
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}

View file

@ -11,7 +11,6 @@ import (
"net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"text/template"
@ -106,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
fatalOnErr(errors.New("can't find account in wallet"))
}
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
@ -411,7 +410,8 @@ func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client
var err error
shuffled := slices.Clone(rpc)
shuffled := make([]string, len(rpc))
copy(shuffled, rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled {

View file

@ -4,14 +4,17 @@ import (
"bytes"
"cmp"
"context"
"crypto/ecdsa"
"errors"
"fmt"
"io"
"os"
"slices"
"sort"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
buffPool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
@ -19,7 +22,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"golang.org/x/sync/errgroup"
)
var errMissingHeaderInResponse = errors.New("missing header in response")
@ -77,31 +82,13 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers()
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
sort.Slice(list, func(i, j int) bool {
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
return strings.Compare(lhs, rhs) < 0
})
return list
}
func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
cliPrm := &client.PrmContainerListStream{
XHeaders: prm.XHeaders,
OwnerID: prm.OwnerID,
Session: prm.Session,
}
rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
if err != nil {
return fmt.Errorf("init container list: %w", err)
}
err = rdr.Iterate(processCnr)
if err != nil {
return fmt.Errorf("read container list: %w", err)
}
return
}
// PutContainerPrm groups parameters of PutContainer operation.
type PutContainerPrm struct {
Client *client.Client
@ -466,6 +453,119 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
}, nil
}
func PutSingle(ctx context.Context, pk *ecdsa.PrivateKey, threadCount int, maxSize uint64, prm PutObjectPrm) (*PutObjectRes, error) {
res, err := prm.cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
if maxSize == 0 {
maxSize = res.Info().MaxObjectSize()
}
if threadCount <= 0 {
threadCount = 1
}
it := &internalTarget{client: prm.cli, prm: client.PrmObjectPutSingle{
XHeaders: prm.xHeaders,
BearerToken: prm.bearerToken,
Local: prm.local,
CopiesNumber: prm.copyNum,
Key: pk,
}}
it.eg.SetLimit(threadCount)
defer it.eg.Wait()
sz := prm.hdr.PayloadSize()
pool := buffPool.NewBufferPool(uint32(res.Info().MaxObjectSize()))
wrt := transformer.NewPayloadSizeLimiter(transformer.Params{
Key: pk,
NextTargetInit: func() transformer.ObjectWriter {
return it
},
NetworkState: epochSource(res.Info().CurrentEpoch()),
MaxSize: maxSize,
WithoutHomomorphicHash: res.Info().HomomorphicHashingDisabled(),
SizeHint: sz,
Pool: &pool,
})
if err := wrt.WriteHeader(ctx, prm.hdr); err != nil {
return nil, err
}
if prm.headerCallback != nil {
prm.headerCallback()
}
if data := prm.hdr.Payload(); len(data) > 0 {
if prm.rdr != nil {
prm.rdr = io.MultiReader(bytes.NewReader(data), prm.rdr)
} else {
prm.rdr = bytes.NewReader(data)
sz = uint64(len(data))
}
}
if prm.rdr != nil {
const defaultBufferSizePut = 4 << 20 // Maximum chunk size is 3 MiB in the SDK.
if sz == 0 || sz > defaultBufferSizePut {
sz = defaultBufferSizePut
}
buf := make([]byte, sz)
var n int
for {
n, err = prm.rdr.Read(buf)
if n > 0 {
if _, err := wrt.Write(ctx, buf[:n]); err != nil {
return nil, err
}
continue
}
if errors.Is(err, io.EOF) {
break
}
return nil, fmt.Errorf("read payload: %w", err)
}
}
if err := it.eg.Wait(); err != nil {
return nil, err
}
cliRes, err := wrt.Close(ctx)
if err != nil { // here err already carries both status and client errors
return nil, fmt.Errorf("client failure: %w", err)
}
return &PutObjectRes{id: cliRes.SelfID}, nil
}
type internalTarget struct {
eg errgroup.Group
client *client.Client
pool *buffPool.BufferPool
prm client.PrmObjectPutSingle
}
func (it *internalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
it.eg.Go(func() error {
defer it.pool.Put(&buffPool.Buffer{Data: obj.Payload()})
prm := it.prm
prm.Object = obj
_, err := it.client.ObjectPutSingle(ctx, prm)
return err
})
return nil
}
// DeleteObjectPrm groups parameters of DeleteObject operation.
type DeleteObjectPrm struct {
commonObjectPrm

View file

@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" {
err := os.WriteFile(outputPath, overrideMarshalled, 0o644)
err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else {
fmt.Print("\n")

View file

@ -6,11 +6,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// flags of list command.
@ -54,60 +51,44 @@ var listContainersCmd = &cobra.Command{
var prm internalclient.ListContainersPrm
prm.SetClient(cli)
prm.OwnerID = idUser
prm.Account = idUser
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
prmGet := internalclient.GetContainerPrm{
Client: cli,
}
var containerIDs []cid.ID
err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
printContainer(cmd, prmGet, id)
return false
})
if err == nil {
return
}
if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
containerIDs = res.SortedIDList()
} else {
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}
containerIDs := res.SortedIDList()
for _, cnrID := range containerIDs {
printContainer(cmd, prmGet, cnrID)
if flagVarListName == "" && !flagVarListPrintAttr {
cmd.Println(cnrID.String())
continue
}
prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
cmd.Printf(" failed to read attributes: %v\n", err)
continue
}
cnr := res.Container()
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
continue
}
cmd.Println(cnrID.String())
if flagVarListPrintAttr {
cnr.IterateUserAttributes(func(key, val string) {
cmd.Printf(" %s: %s\n", key, val)
})
}
}
},
}
func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
if flagVarListName == "" && !flagVarListPrintAttr {
cmd.Println(id.String())
return
}
prmGet.ClientParams.ContainerID = &id
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
cmd.Printf(" failed to read attributes: %v\n", err)
return
}
cnr := res.Container()
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
return
}
cmd.Println(id.String())
if flagVarListPrintAttr {
cnr.IterateUserAttributes(func(key, val string) {
cmd.Printf(" %s: %s\n", key, val)
})
}
}
func initContainerListContainersCmd() {
commonflags.Init(listContainersCmd)

View file

@ -23,11 +23,11 @@ type policyPlaygroundREPL struct {
nodes map[string]netmap.NodeInfo
}
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{
cmd: cmd,
nodes: map[string]netmap.NodeInfo{},
}
}, nil
}
func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@ -246,7 +246,8 @@ var policyPlaygroundCmd = &cobra.Command{
Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) {
repl := newPolicyPlaygroundREPL(cmd)
repl, err := newPolicyPlaygroundREPL(cmd)
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
},
}

View file

@ -0,0 +1,56 @@
package control
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
const ignoreErrorsFlag = "no-errors"
var evacuateShardCmd = &cobra.Command{
Use: "evacuate",
Short: "Evacuate objects from shard",
Long: "Evacuate objects from shard to other shards",
Run: evacuateShard,
Deprecated: "use frostfs-cli control shards evacuation start",
}
func evacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
req.Body.Shard_ID = getShardIDList(cmd)
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.EvacuateShardResponse
var err error
err = cli.ExecRaw(func(client *client.Client) error {
resp, err = control.EvacuateShard(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Shard has successfully been evacuated.")
}
func initControlEvacuateShardCmd() {
initControlFlags(evacuateShardCmd)
flags := evacuateShardCmd.Flags()
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
flags.Bool(shardAllFlag, false, "Process all shards")
flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}

View file

@ -17,11 +17,10 @@ import (
)
const (
awaitFlag = "await"
noProgressFlag = "no-progress"
scopeFlag = "scope"
repOneOnlyFlag = "rep-one-only"
ignoreErrorsFlag = "no-errors"
awaitFlag = "await"
noProgressFlag = "no-progress"
scopeFlag = "scope"
repOneOnlyFlag = "rep-one-only"
containerWorkerCountFlag = "container-worker-count"
objectWorkerCountFlag = "object-worker-count"

View file

@ -13,6 +13,7 @@ var shardsCmd = &cobra.Command{
func initControlShardsCmd() {
shardsCmd.AddCommand(listShardsCmd)
shardsCmd.AddCommand(setShardModeCmd)
shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(evacuationShardCmd)
shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd)
@ -22,6 +23,7 @@ func initControlShardsCmd() {
initControlShardsListCmd()
initControlSetShardModeCmd()
initControlEvacuateShardCmd()
initControlEvacuationShardCmd()
initControlFlushCacheCmd()
initControlDoctorCmd()

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
@ -41,9 +42,7 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...")
_ = objectHashCmd.MarkFlagRequired("range")
flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
}
@ -67,6 +66,36 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
tz := typ == hashTz
fullHash := len(ranges) == 0
if fullHash {
var headPrm internalclient.HeadObjectPrm
headPrm.SetClient(cli)
Prepare(cmd, &headPrm)
headPrm.SetAddress(objAddr)
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
var cs checksum.Checksum
var csSet bool
if tz {
cs, csSet = res.Header().PayloadHomomorphicHash()
} else {
cs, csSet = res.Header().PayloadChecksum()
}
if csSet {
cmd.Println(hex.EncodeToString(cs.Value()))
} else {
cmd.Println("Missing checksum in object header.")
}
return
}
var hashPrm internalclient.HashPayloadRangesPrm
hashPrm.SetClient(cli)
Prepare(cmd, &hashPrm)
@ -75,7 +104,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
hashPrm.SetSalt(salt)
hashPrm.SetRanges(ranges)
if typ == hashTz {
if tz {
hashPrm.TZ()
}

View file

@ -1,12 +1,15 @@
package object
import (
"bytes"
"cmp"
"context"
"crypto/ecdsa"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"slices"
"sync"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
@ -504,6 +507,7 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
}
func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
normilizeObjectNodesResult(objects, result)
if json, _ := cmd.Flags().GetBool(commonflags.JSON); json {
printObjectNodesAsJSON(cmd, objID, objects, result)
} else {
@ -511,6 +515,34 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
}
}
func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) {
slices.SortFunc(objects, func(lhs, rhs phyObject) int {
if lhs.ecHeader == nil && rhs.ecHeader == nil {
return bytes.Compare(lhs.objectID[:], rhs.objectID[:])
}
if lhs.ecHeader == nil {
return -1
}
if rhs.ecHeader == nil {
return 1
}
if lhs.ecHeader.parent == rhs.ecHeader.parent {
return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index)
}
return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:])
})
for _, obj := range objects {
op := result.placements[obj.objectID]
slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
})
slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
})
result.placements[obj.objectID] = op
}
}
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))

View file

@ -46,7 +46,7 @@ func initObjectPatchCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2")
flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
@ -99,9 +99,11 @@ func patch(cmd *cobra.Command, _ []string) {
}
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName)
if err != nil {
return nil, err
var rawAttrs []string
raw := cmd.Flag(newAttrsFlagName).Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -27,6 +27,8 @@ const (
notificationFlag = "notify"
copiesNumberFlag = "copies-number"
prepareLocallyFlag = "prepare-locally"
threadCountFlag = "thread-count"
maxSizeFlag = "max-size-mb"
)
var putExpiredOn uint64
@ -50,7 +52,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2")
flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@ -61,6 +63,8 @@ func initObjectPutCmd() {
flags.Bool(binaryFlag, false, "Deserialize object structure from given file.")
flags.String(copiesNumberFlag, "", "Number of copies of the object to store within the RPC call")
flags.Int(threadCountFlag, 1, "Max objects to put in parallel")
flags.Uint64(maxSizeFlag, 0, "Max object size in mebibytes")
}
func putObject(cmd *cobra.Command, _ []string) {
@ -107,6 +111,24 @@ func putObject(cmd *cobra.Command, _ []string) {
if prepareLocally, _ := cmd.Flags().GetBool(prepareLocallyFlag); prepareLocally {
prm.SetClient(internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC))
prm.PrepareLocally()
Prepare(cmd, &prm)
prm.SetHeader(obj)
prm.SetPayloadReader(payloadReader)
copyNum, err := cmd.Flags().GetString(copiesNumberFlag)
commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err)
prm.SetCopiesNumberByVectors(parseCopyNumber(cmd, copyNum))
threadCount, _ := cmd.Flags().GetInt(threadCountFlag)
maxSize, _ := cmd.Flags().GetUint64(maxSizeFlag)
maxSize <<= 20 // In mebibytes.
res, err := internalclient.PutSingle(cmd.Context(), pk, threadCount, maxSize, prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
cmd.Printf("[%s] Object successfully stored\n", filename)
cmd.Printf(" OID: %s\n CID: %s\n", res.ID(), cnr)
} else {
ReadOrOpenSession(cmd, &prm, pk, cnr, nil)
}
@ -214,9 +236,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
}
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice("attributes")
if err != nil {
return nil, err
var rawAttrs []string
raw := cmd.Flag("attributes").Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take data from in the form offset:length")
flags.String("range", "", "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc)
}
@ -195,10 +195,11 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
}
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
vs, err := cmd.Flags().GetStringSlice("range")
if len(vs) == 0 || err != nil {
return nil, err
v := cmd.Flag("range").Value.String()
if len(v) == 0 {
return nil, nil
}
vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs))
for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep)

View file

@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err),
zap.String("error", err.Error()),
)
} else {
c.init(ctx)

View file

@ -119,12 +119,12 @@ func shutdown(ctx context.Context) {
innerRing.Stop(ctx)
if err := metricsCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err),
zap.String("error", err.Error()),
)
}
if err := pprofCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err),
zap.String("error", err.Error()),
)
}

View file

@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err))
zap.String("error", err.Error()))
return
}

View file

@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren(
path := parentBucket.Path
parser := parentBucket.NextParser
buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
if err != nil {
return err
}
for item := range buffer {
if item.err != nil {
@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren(
}
bucket := item.val
var err error
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
if err != nil {
return err
@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
defer cancel()
// Check the current bucket's nested buckets if exist
bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range bucketsBuffer {
if item.err != nil {
@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
b := item.val
var err error
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
if err != nil {
return false, err
@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
// Check the current bucket's nested records if exist
recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range recordsBuffer {
if item.err != nil {
@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
r := item.val
var err error
r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
if err != nil {
return false, err

View file

@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
func load[T any](
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
filter func(key, value []byte) bool, transform func(key, value []byte) T,
) <-chan Item[T] {
) (<-chan Item[T], error) {
buffer := make(chan Item[T], bufferSize)
go func() {
@ -77,13 +77,13 @@ func load[T any](
}
}()
return buffer
return buffer, nil
}
func LoadBuckets(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Bucket] {
buffer := load(
) (<-chan Item[*Bucket], error) {
buffer, err := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value == nil
@ -98,14 +98,17 @@ func LoadBuckets(
}
},
)
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer
return buffer, nil
}
func LoadRecords(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Record] {
buffer := load(
) (<-chan Item[*Record], error) {
buffer, err := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value != nil
@ -121,8 +124,11 @@ func LoadRecords(
}
},
)
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer
return buffer, nil
}
// HasBuckets checks if a bucket has nested buckets. It relies on assumption
@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
buffer := load(
buffer, err := load(
ctx, db, path, 1,
nil,
func(_, value []byte) []byte { return value },
)
if err != nil {
return false, err
}
x, ok := <-buffer
if !ok {
return false, nil
}
if x.err != nil {
return false, x.err
return false, err
}
if x.val != nil {
return false, nil
return false, err
}
return true, nil
}

View file

@ -62,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error {
ctx, v.onUnmount = context.WithCancel(ctx)
tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
if err != nil {
return err
}
v.buffer = make(chan *Record, v.ui.loadBufferSize)
go func() {
@ -70,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error {
for item := range tempBuffer {
if item.err != nil {
v.ui.stopOnError(item.err)
v.ui.stopOnError(err)
break
}
record := item.val
var err error
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
if err != nil {
v.ui.stopOnError(err)

View file

@ -19,7 +19,6 @@ func initAPEManagerService(c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
c.cfgMorph.client,
apemanager.WithLogger(c.log))
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)

View file

@ -591,6 +591,8 @@ type cfgMorph struct {
client *client.Client
notaryEnabled bool
// TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration
@ -606,10 +608,9 @@ type cfgAccounting struct {
type cfgContainer struct {
scriptHash neogoutil.Uint160
parsers map[event.Type]event.NotificationParser
subscribers map[event.Type][]event.Handler
workerPool util.WorkerPool // pool for asynchronous handlers
containerBatchSize uint32
parsers map[event.Type]event.NotificationParser
subscribers map[event.Type][]event.Handler
workerPool util.WorkerPool // pool for asynchronous handlers
}
type cfgFrostfsID struct {
@ -698,7 +699,8 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector
logPrm := c.loggerPrm()
logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
@ -852,8 +854,8 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
}
func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
return cfgGRPC{
maxChunkSize: maxChunkSize,
@ -1058,7 +1060,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return sh
}
func (c *cfg) loggerPrm() *logger.Prm {
func (c *cfg) loggerPrm() (*logger.Prm, error) {
// check if it has been inited before
if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm)
@ -1077,7 +1079,7 @@ func (c *cfg) loggerPrm() *logger.Prm {
}
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger
return c.dynamicConfiguration.logger, nil
}
func (c *cfg) LocalAddress() network.AddressGroup {
@ -1119,7 +1121,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
err := ls.Close(context.WithoutCancel(ctx))
if err != nil {
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
zap.Error(err),
zap.String("error", err.Error()),
)
} else {
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
@ -1146,7 +1148,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
if cacheSize > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
}
@ -1209,7 +1211,7 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch),
zap.Error(err))
zap.String("error", err.Error()))
return
}
@ -1219,9 +1221,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil.
func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
ni := c.cfgNodeInfo.localInfo
ni.SetStatus(state)
stateSetter(&ni)
prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni)
@ -1231,7 +1233,9 @@ func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) er
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
func bootstrapOnline(ctx context.Context, c *cfg) error {
return c.bootstrapWithState(ctx, netmap.Online)
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Online)
})
}
// bootstrap calls bootstrapWithState with:
@ -1242,7 +1246,9 @@ func (c *cfg) bootstrap(ctx context.Context) error {
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(ctx, netmap.Maintenance)
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Maintenance)
})
}
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
@ -1333,7 +1339,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// Logger
logPrm := c.loggerPrm()
logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
components := c.getComponents(ctx, logPrm)
@ -1456,7 +1466,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
return container.NewInfoProvider(func() (container.Source, error) {
c.initMorphComponents(ctx)
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
if err != nil {
return nil, err
}

View file

@ -1,7 +1,6 @@
package config
import (
"slices"
"strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used
// to provide default values.
func (x *Config) SetDefault(from *Config) {
x.defaultPath = slices.Clone(from.path)
x.defaultPath = make([]string, len(from.path))
copy(x.defaultPath, from.path)
}

View file

@ -1,27 +0,0 @@
package containerconfig
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
const (
subsection = "container"
listStreamSubsection = "list_stream"
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
ContainerBatchSizeDefault = 1000
)
// ContainerBatchSize returns the value of "batch_size" config parameter
// from "list_stream" subsection of "container" section.
//
// Returns ContainerBatchSizeDefault if the value is missing or if
// the value is not positive integer.
func ContainerBatchSize(c *config.Config) uint32 {
if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
return ContainerBatchSizeDefault
}
size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
if size == 0 {
return ContainerBatchSizeDefault
}
return size
}

View file

@ -1,27 +0,0 @@
package containerconfig_test
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestContainerSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
})
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -41,10 +41,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
c.Sub(si),
)
if sc.Mode() == mode.Disabled {
continue
}
// Path for the blobstor can't be present in the default section, because different shards
// must have different paths, so if it is missing, the shard is not here.
// At the same time checking for "blobstor" section doesn't work proper
@ -54,6 +50,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
}
(*config.Config)(sc).SetDefault(def)
if sc.Mode() == mode.Disabled {
continue
}
if err := f(sc); err != nil {
return err
}

View file

@ -18,22 +18,6 @@ import (
"github.com/stretchr/testify/require"
)
func TestIterateShards(t *testing.T) {
fileConfigTest := func(c *config.Config) {
var res []string
require.NoError(t,
engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
res = append(res, sc.Metabase().Path())
return nil
}))
require.Equal(t, []string{"abc", "xyz"}, res)
}
const cfgDir = "./testdata/shards"
configtest.ForEachFileType(cfgDir, fileConfigTest)
configtest.ForEnvFileType(t, cfgDir, fileConfigTest)
}
func TestEngineSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()

View file

@ -1,3 +0,0 @@
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc
FROSTFS_STORAGE_SHARD_1_MODE=disabled
FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz

View file

@ -1,13 +0,0 @@
{
"storage.shard": {
"0": {
"metabase.path": "abc"
},
"1": {
"mode": "disabled"
},
"2": {
"metabase.path": "xyz"
}
}
}

View file

@ -1,7 +0,0 @@
storage.shard:
0:
metabase.path: abc
1:
mode: disabled
2:
metabase.path: xyz

View file

@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
//
// Returns PermDefault if the value is not a positive number.
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
p := config.UintSafe(l.cfg, "perm")
p := config.UintSafe((*config.Config)(l.cfg), "perm")
if p == 0 {
p = PermDefault
}
@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
//
// Returns false if the value is not a boolean.
func (l PersistentPolicyRulesConfig) NoSync() bool {
return config.BoolSafe(l.cfg, "no_sync")
return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
}
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.

View file

@ -5,7 +5,6 @@ import (
"context"
"net"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
@ -29,7 +28,7 @@ import (
func initContainerService(_ context.Context, c *cfg) {
// container wrapper that tries to invoke notary
// requests if chain is configured so
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
fatalOnErr(err)
c.shared.cnrClient = wrap
@ -43,12 +42,11 @@ func initContainerService(_ context.Context, c *cfg) {
fatalOnErr(err)
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
if cacheSize > 0 {
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
}
c.shared.frostfsidClient = frostfsIDSubjectProvider
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
@ -58,9 +56,7 @@ func initContainerService(_ context.Context, c *cfg) {
&c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
containerService.NewSplitterService(
c.cfgContainer.containerBatchSize, c.respSvc,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
),
)
service = containerService.NewAuditService(service, c.log, c.audit)
@ -222,7 +218,6 @@ type morphContainerReader struct {
lister interface {
ContainersOf(*user.ID) ([]cid.ID, error)
IterateContainersOf(*user.ID, func(cid.ID) error) error
}
}
@ -238,10 +233,6 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
return x.lister.ContainersOf(id)
}
func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error {
return x.lister.IterateContainersOf(id, processCID)
}
type morphContainerWriter struct {
neoClient *cntClient.Client
}

View file

@ -134,7 +134,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C
err := stopper(ctx)
if err != nil {
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
zap.Error(err),
zap.String("error", err.Error()),
)
}

View file

@ -35,16 +35,20 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
lookupScriptHashesInNNS(c) // smart contract auto negotiation
err := c.cfgMorph.client.EnableNotarySupport(
client.WithProxyContract(
c.cfgMorph.proxyScriptHash,
),
if c.cfgMorph.notaryEnabled {
err := c.cfgMorph.client.EnableNotarySupport(
client.WithProxyContract(
c.cfgMorph.proxyScriptHash,
),
)
fatalOnErr(err)
}
c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
)
fatalOnErr(err)
c.log.Info(ctx, logs.FrostFSNodeNotarySupport)
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0)
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
fatalOnErr(err)
var netmapSource netmap.Source
@ -96,7 +100,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
if err != nil {
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
zap.Error(err),
zap.String("error", err.Error()),
)
fatalOnErr(err)
@ -112,9 +116,15 @@ func initMorphClient(ctx context.Context, c *cfg) {
}
c.cfgMorph.client = cli
c.cfgMorph.notaryEnabled = cli.ProbeNotary()
}
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
// skip notary deposit in non-notary environments
if !c.cfgMorph.notaryEnabled {
return
}
tx, vub, err := makeNotaryDeposit(ctx, c)
fatalOnErr(err)
@ -151,7 +161,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
}
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil {
if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil {
return err
}
@ -168,7 +178,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
subs, err = subscriber.New(ctx, &subscriber.Params{
@ -223,17 +233,27 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse
subs map[event.Type][]event.Handler,
) {
for typ, handlers := range subs {
pi := event.NotificationParserInfo{}
pi.SetType(typ)
pi.SetScriptHash(scHash)
p, ok := parsers[typ]
if !ok {
panic(fmt.Sprintf("missing parser for event %s", typ))
}
lis.RegisterNotificationHandler(event.NotificationHandlerInfo{
Contract: scHash,
Type: typ,
Parser: p,
Handlers: handlers,
})
pi.SetParser(p)
lis.SetNotificationParser(pi)
for _, h := range handlers {
hi := event.NotificationHandlerInfo{}
hi.SetType(typ)
hi.SetScriptHash(scHash)
hi.SetHandler(h)
lis.RegisterNotificationHandler(hi)
}
}
}
@ -262,6 +282,10 @@ func lookupScriptHashesInNNS(c *cfg) {
)
for _, t := range targets {
if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
continue // ignore proxy contract if notary disabled
}
if emptyHash.Equals(*t.h) {
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)

View file

@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
}
}
s.setControlNetmapStatus(ctrlNetSt)
s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
}
// sets the current node state to the given value. Subsequent cfg.bootstrap
@ -193,14 +193,16 @@ func addNewEpochNotificationHandlers(c *cfg) {
}
})
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
_, _, err := makeNotaryDeposit(ctx, c)
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.Error(err),
)
}
})
if c.cfgMorph.notaryEnabled {
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
_, _, err := makeNotaryDeposit(ctx, c)
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.String("error", err.Error()),
)
}
})
}
}
// bootstrapNode adds current node to the Network map.
@ -423,7 +425,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.
if err != nil {
return err
}
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash)
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res)
}
type netInfo struct {

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@ -58,7 +59,7 @@ func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.Error(err),
zap.String("error", err.Error()),
)
}
@ -136,6 +137,24 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
return result, nil
}
type innerRingFetcherWithoutNotary struct {
nm *nmClient.Client
}
func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
keys, err := f.nm.GetInnerRingList()
if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
}
result := make([][]byte, 0, len(keys))
for i := range keys {
result = append(result, keys[i].Bytes())
}
return result, nil
}
func initObjectService(c *cfg) {
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
@ -215,7 +234,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
prm.MarkAsGarbage(addr)
prm.WithForceRemoval()
return ls.Inhume(ctx, prm)
_, err := ls.Inhume(ctx, prm)
return err
}
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
@ -265,9 +285,10 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr)
if err := ls.Inhume(ctx, inhumePrm); err != nil {
_, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.Error(err),
zap.String("error", err.Error()),
)
}
}),
@ -284,8 +305,13 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
}
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client,
if c.cfgMorph.client.ProbeNotary() {
return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client,
}
}
return &innerRingFetcherWithoutNotary{
nm: c.cfgNetmap.wrapper,
}
}
@ -474,7 +500,8 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
prm.WithTarget(tombstone, addrs...)
return e.engine.Inhume(ctx, prm)
_, err := e.engine.Inhume(ctx, prm)
return err
}
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {

View file

@ -113,7 +113,7 @@ func initTreeService(c *cfg) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
zap.Error(err))
zap.String("error", err.Error()))
}
})

View file

@ -83,9 +83,6 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
FROSTFS_REPLICATOR_POOL_SIZE=10
# Container service section
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
# Object service section
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200

View file

@ -124,11 +124,6 @@
"pool_size": 10,
"put_timeout": "15s"
},
"container": {
"list_stream": {
"batch_size": "500"
}
},
"object": {
"delete": {
"tombstone_lifetime": 10

View file

@ -79,8 +79,7 @@ contracts: # side chain NEOFS contract script hashes; optional, override values
morph:
dial_timeout: 30s # timeout for side chain NEO RPC client connection
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls).
# Negative value disables caching. A zero value sets the default value.
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
# Default value: block time. It is recommended to have this value less or equal to block time.
# Cached entities: containers, container lists, eACL tables.
container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
@ -109,10 +108,6 @@ replicator:
put_timeout: 15s # timeout for the Replicator PUT remote operation
pool_size: 10 # maximum amount of concurrent replications
container:
list_stream:
batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once
object:
delete:
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs

View file

@ -42,6 +42,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080",
@ -97,6 +98,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082",
@ -152,6 +154,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084",
@ -207,6 +210,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086",

View file

@ -95,15 +95,19 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release
### Prepare and push images to a Docker registry (automated)
### Prepare and push images to a Docker Hub (if not automated)
Create Docker images for all applications and push them into container registry
(executed automatically in Forgejo Actions upon pushing a release tag):
Create Docker images for all applications and push them into Docker Hub
(requires [organization](https://hub.docker.com/u/truecloudlab) privileges)
```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images
$ make push-images
$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
```
### Make a proper release (if not automated)

34
go.mod
View file

@ -4,11 +4,11 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
@ -27,7 +27,7 @@ require (
github.com/klauspost/compress v1.17.4
github.com/mailru/easyjson v0.7.7
github.com/mr-tron/base58 v1.2.0
github.com/multiformats/go-multiaddr v0.14.0
github.com/multiformats/go-multiaddr v0.12.1
github.com/nspcc-dev/neo-go v0.106.3
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
@ -40,15 +40,15 @@ require (
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.10
go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/trace v1.31.0
go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/sync v0.10.0
golang.org/x/sys v0.28.0
golang.org/x/term v0.27.0
google.golang.org/grpc v1.69.2
google.golang.org/protobuf v1.36.1
golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
golang.org/x/term v0.21.0
google.golang.org/grpc v1.66.2
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1
)
@ -119,15 +119,15 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/text v0.21.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/text v0.16.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect

72
go.sum
View file

@ -1,15 +1,15 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4 h1:o3iqVmbvFsfe8kpB2Hvuix6Q/tAhbiPLP91xK4lmoBQ=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a h1:Ud+3zz4WP9HPxEQxDPJZPpiPdm30nDNSKucsWP9L54M=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 h1:sepm9FeuoInmygH1K/+3L+Yp5bJhGiVi/oGCH6Emp2c=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
@ -106,8 +106,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -190,8 +188,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU=
github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4=
github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk=
github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
@ -292,22 +290,20 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@ -322,8 +318,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -343,16 +339,16 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -379,16 +375,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -396,8 +392,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
@ -410,12 +406,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -424,8 +420,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View file

@ -146,6 +146,7 @@ const (
ClientCantGetBlockchainHeight = "can't get blockchain height"
ClientCantGetBlockchainHeight243 = "can't get blockchain height"
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
EventCouldNotStartListenToEvents = "could not start listen to events"
EventStopEventListenerByError = "stop event listener by error"
EventStopEventListenerByContext = "stop event listener by context"
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
@ -163,9 +164,17 @@ const (
EventNotaryParserNotSet = "notary parser not set"
EventCouldNotParseNotaryEvent = "could not parse notary event"
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
EventIgnoreNilEventParser = "ignore nil event parser"
EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser"
EventRegisteredNewEventParser = "registered new event parser"
EventIgnoreNilEventHandler = "ignore nil event handler"
EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser"
EventRegisteredNewEventHandler = "registered new event handler"
EventIgnoreNilNotaryEventParser = "ignore nil notary event parser"
EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser"
EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler"
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
EventIgnoreNilBlockHandler = "ignore nil block handler"
StorageOperation = "local object storage operation"
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
BlobovniczaOpeningBoltDB = "opening BoltDB"
@ -383,6 +392,7 @@ const (
FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown"
FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing"
FrostFSNodeConfigurationReading = "configuration reading"
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation"
FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
FrostFSNodePoolConfigurationUpdate = "adjust pool configuration"

View file

@ -12,9 +12,8 @@ type ApplicationInfo struct {
func NewApplicationInfo(version string) *ApplicationInfo {
appInfo := &ApplicationInfo{
versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "app_info",
Help: "General information about the application.",
Name: "app_info",
Help: "General information about the application.",
}, []string{"version"}),
}
appInfo.versionValue.With(prometheus.Labels{"version": version})

View file

@ -31,7 +31,9 @@ type RPCActorProvider interface {
type ProxyVerificationContractStorage struct {
rpcActorProvider RPCActorProvider
cosigners []actor.SignerAccount
acc *wallet.Account
proxyScriptHash util.Uint160
policyScriptHash util.Uint160
}
@ -39,27 +41,12 @@ type ProxyVerificationContractStorage struct {
var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil)
func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage {
acc := wallet.NewAccountFromPrivateKey(key)
return &ProxyVerificationContractStorage{
rpcActorProvider: rpcActorProvider,
cosigners: []actor.SignerAccount{
{
Signer: transaction.Signer{
Account: proxyScriptHash,
Scopes: transaction.CustomContracts,
AllowedContracts: []util.Uint160{policyScriptHash},
},
Account: notary.FakeContractAccount(proxyScriptHash),
},
{
Signer: transaction.Signer{
Account: acc.Contract.ScriptHash(),
Scopes: transaction.CalledByEntry,
},
Account: acc,
},
},
acc: wallet.NewAccountFromPrivateKey(key),
proxyScriptHash: proxyScriptHash,
policyScriptHash: policyScriptHash,
}
@ -77,7 +64,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke {
func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) {
rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
act, err := actor.New(rpcActor, contractStorage.cosigners)
act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash))
if err != nil {
return nil, err
}
@ -111,16 +98,31 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na
// ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners.
func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor}
return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
// contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
// ProxyVerificationContractStorage does not manage reconnections.
contractStorageActor, err := contractStorage.newContractStorageActor()
if err != nil {
return nil, err
}
return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
}
type invokerAdapter struct {
*invoker.Invoker
rpcInvoker invoker.RPCInvoke
}
func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
return n.rpcInvoker
func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount {
return []actor.SignerAccount{
{
Signer: transaction.Signer{
Account: proxyScriptHash,
Scopes: transaction.CustomContracts,
AllowedContracts: []util.Uint160{policyScriptHash},
},
Account: notary.FakeContractAccount(proxyScriptHash),
},
{
Signer: transaction.Signer{
Account: acc.Contract.ScriptHash(),
Scopes: transaction.CalledByEntry,
},
Account: acc,
},
}
}

View file

@ -67,7 +67,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
if err != nil {
// do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
zap.Error(err))
zap.String("error", err.Error()))
} else if isInnerRingNode {
return &ClassifyResult{
Role: acl.RoleInnerRing,
@ -84,7 +84,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
// is not possible for previous epoch, so
// do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
zap.Error(err))
zap.String("error", err.Error()))
} else if isContainerNode {
return &ClassifyResult{
Role: acl.RoleContainer,

View file

@ -8,6 +8,7 @@ type (
// ContractProcessor interface defines functions for binding event producers
// such as event.Listener and Timers with contract processor.
ContractProcessor interface {
ListenerNotificationParsers() []event.NotificationParserInfo
ListenerNotificationHandlers() []event.NotificationHandlerInfo
ListenerNotaryParsers() []event.NotaryParserInfo
ListenerNotaryHandlers() []event.NotaryHandlerInfo
@ -15,6 +16,11 @@ type (
)
func connectListenerWithProcessor(l event.Listener, p ContractProcessor) {
// register notification parsers
for _, parser := range p.ListenerNotificationParsers() {
l.SetNotificationParser(parser)
}
// register notification handlers
for _, handler := range p.ListenerNotificationHandlers() {
l.RegisterNotificationHandler(handler)

View file

@ -38,7 +38,10 @@ import (
func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
alphaSync event.Handler,
) error {
locodeValidator := s.newLocodeValidator(cfg)
locodeValidator, err := s.newLocodeValidator(cfg)
if err != nil {
return err
}
netSettings := (*networkSettings)(s.netmapClient)
@ -48,7 +51,6 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
poolSize := cfg.GetInt("workers.netmap")
s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
var err error
s.netmapProcessor, err = netmap.New(&netmap.Params{
Log: s.log,
Metrics: s.irMetrics,
@ -98,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil {
fromMainChainBlock = 0
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
}
mainnetChain.from = fromMainChainBlock
@ -378,6 +380,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
// form morph container client's options
morphCnrOpts := make([]container.Option, 0, 3)
morphCnrOpts = append(morphCnrOpts,
container.TryNotary(),
container.AsAlphabet(),
)
@ -387,12 +390,12 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
}
s.containerClient = result.CnrClient
s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet())
s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet())
if err != nil {
return nil, err
}
s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet())
s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet())
if err != nil {
return nil, err
}
@ -454,7 +457,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
morphChain := &chainParams{

View file

@ -177,7 +177,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
if err != nil {
// we don't stop inner ring execution on this error
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
zap.Error(err))
zap.String("error", err.Error()))
}
s.tickInitialExpoch(ctx)
@ -308,7 +308,7 @@ func (s *Server) Stop(ctx context.Context) {
for _, c := range s.closers {
if err := c(); err != nil {
s.log.Warn(ctx, logs.InnerringCloserError,
zap.Error(err),
zap.String("error", err.Error()),
)
}
}

View file

@ -9,7 +9,7 @@ import (
"github.com/spf13/viper"
)
func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) {
locodeDB := locodebolt.New(locodebolt.Prm{
Path: cfg.GetString("locode.db.path"),
},
@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
return irlocode.New(irlocode.Prm{
DB: (*locodeBoltDBWrapper)(locodeDB),
})
}), nil
}
type locodeBoltEntryWrapper struct {

View file

@ -33,7 +33,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
// there is no signature collecting, so we don't need extra fee
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
return false
}
@ -47,7 +47,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
networkMap, err := ap.netmapClient.NetMap()
if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
zap.Error(err))
zap.String("error", err.Error()))
return false
}
@ -83,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
zap.Error(err))
zap.String("error", err.Error()))
continue
}
@ -93,7 +93,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
zap.Error(err),
zap.String("error", err.Error()),
)
}
}
@ -110,7 +110,7 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
zap.Error(err),
zap.String("error", err.Error()),
)
}
}

View file

@ -114,6 +114,11 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) {
ap.pwLock.Unlock()
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
return nil
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil

View file

@ -88,16 +88,32 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
var parsers []event.NotificationParserInfo
// new lock event
lock := event.NotificationParserInfo{}
lock.SetType(lockNotification)
lock.SetScriptHash(bp.balanceSC)
lock.SetParser(balanceEvent.ParseLock)
parsers = append(parsers, lock)
return parsers
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{
{
Contract: bp.balanceSC,
Type: lockNotification,
Parser: balanceEvent.ParseLock,
Handlers: []event.Handler{bp.handleLock},
},
}
var handlers []event.NotificationHandlerInfo
// lock handler
lock := event.NotificationHandlerInfo{}
lock.SetType(lockNotification)
lock.SetScriptHash(bp.balanceSC)
lock.SetHandler(bp.handleLock)
handlers = append(handlers, lock)
return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -50,7 +50,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
err := cp.checkPutContainer(pctx)
if err != nil {
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
zap.Error(err),
zap.String("error", err.Error()),
)
return false
@ -58,7 +58,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
zap.Error(err),
zap.String("error", err.Error()),
)
return false
}
@ -113,7 +113,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
err := cp.checkDeleteContainer(e)
if err != nil {
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
zap.Error(err),
zap.String("error", err.Error()),
)
return false
@ -121,7 +121,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
zap.Error(err),
zap.String("error", err.Error()),
)
return false

View file

@ -118,6 +118,11 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
return nil
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil

View file

@ -73,7 +73,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
zap.Error(err))
zap.String("error", err.Error()))
return false
}

View file

@ -142,34 +142,70 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
var (
parsers = make([]event.NotificationParserInfo, 0, 6)
p event.NotificationParserInfo
)
p.SetScriptHash(np.frostfsContract)
// deposit event
p.SetType(event.TypeFromString(depositNotification))
p.SetParser(frostfsEvent.ParseDeposit)
parsers = append(parsers, p)
// withdraw event
p.SetType(event.TypeFromString(withdrawNotification))
p.SetParser(frostfsEvent.ParseWithdraw)
parsers = append(parsers, p)
// cheque event
p.SetType(event.TypeFromString(chequeNotification))
p.SetParser(frostfsEvent.ParseCheque)
parsers = append(parsers, p)
// config event
p.SetType(event.TypeFromString(configNotification))
p.SetParser(frostfsEvent.ParseConfig)
parsers = append(parsers, p)
return parsers
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{
{
Contract: np.frostfsContract,
Type: event.TypeFromString(depositNotification),
Parser: frostfsEvent.ParseDeposit,
Handlers: []event.Handler{np.handleDeposit},
},
{
Contract: np.frostfsContract,
Type: event.TypeFromString(withdrawNotification),
Parser: frostfsEvent.ParseWithdraw,
Handlers: []event.Handler{np.handleWithdraw},
},
{
Contract: np.frostfsContract,
Type: event.TypeFromString(chequeNotification),
Parser: frostfsEvent.ParseCheque,
Handlers: []event.Handler{np.handleCheque},
},
{
Contract: np.frostfsContract,
Type: event.TypeFromString(configNotification),
Parser: frostfsEvent.ParseConfig,
Handlers: []event.Handler{np.handleConfig},
},
}
var (
handlers = make([]event.NotificationHandlerInfo, 0, 6)
h event.NotificationHandlerInfo
)
h.SetScriptHash(np.frostfsContract)
// deposit handler
h.SetType(event.TypeFromString(depositNotification))
h.SetHandler(np.handleDeposit)
handlers = append(handlers, h)
// withdraw handler
h.SetType(event.TypeFromString(withdrawNotification))
h.SetHandler(np.handleWithdraw)
handlers = append(handlers, h)
// cheque handler
h.SetType(event.TypeFromString(chequeNotification))
h.SetHandler(np.handleCheque)
handlers = append(handlers, h)
// config handler
h.SetType(event.TypeFromString(configNotification))
h.SetHandler(np.handleConfig)
handlers = append(handlers, h)
return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -28,21 +28,21 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
zap.Error(err))
zap.String("error", err.Error()))
return false
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
zap.Error(err))
zap.String("error", err.Error()))
return false
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
zap.Error(err))
zap.String("error", err.Error()))
return false
}
@ -65,7 +65,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
zap.Error(err))
zap.String("error", err.Error()))
}
// 2. Update NeoFSAlphabet role in the sidechain.
@ -98,14 +98,14 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
zap.Error(err))
zap.String("error", err.Error()))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
zap.Error(err))
zap.String("error", err.Error()))
return
}
@ -122,7 +122,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
zap.Error(err))
zap.String("error", err.Error()))
}
}
@ -135,7 +135,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabe
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
zap.Error(err))
zap.String("error", err.Error()))
}
}
@ -155,6 +155,6 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
zap.Error(err))
zap.String("error", err.Error()))
}
}

View file

@ -155,16 +155,22 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
var pi event.NotificationParserInfo
pi.SetScriptHash(gp.designate)
pi.SetType(event.TypeFromString(native.DesignationEventName))
pi.SetParser(rolemanagement.ParseDesignate)
return []event.NotificationParserInfo{pi}
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{
{
Contract: gp.designate,
Type: event.TypeFromString(native.DesignationEventName),
Parser: rolemanagement.ParseDesignate,
Handlers: []event.Handler{gp.HandleAlphabetSync},
},
}
var hi event.NotificationHandlerInfo
hi.SetScriptHash(gp.designate)
hi.SetType(event.TypeFromString(native.DesignationEventName))
hi.SetHandler(gp.HandleAlphabetSync)
return []event.NotificationHandlerInfo{hi}
}
// ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -49,7 +49,7 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
})
if err != nil {
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
zap.Error(err))
zap.String("error", err.Error()))
return false
}

View file

@ -17,7 +17,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
epochDuration, err := np.netmapClient.EpochDuration()
if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
zap.Error(err))
zap.String("error", err.Error()))
} else {
np.epochState.SetEpochDuration(epochDuration)
}
@ -28,19 +28,19 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
zap.String("hash", ev.TxHash().StringLE()),
zap.Error(err))
zap.String("error", err.Error()))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
zap.Error(err))
zap.String("error", err.Error()))
}
// get new netmap snapshot
networkMap, err := np.netmapClient.NetMap()
if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
zap.Error(err))
zap.String("error", err.Error()))
return false
}

View file

@ -42,7 +42,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil {
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
zap.Error(err),
zap.String("error", err.Error()),
)
return false

View file

@ -161,16 +161,36 @@ func New(p *Params) (*Processor, error) {
}, nil
}
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
parsers := make([]event.NotificationParserInfo, 0, 3)
var p event.NotificationParserInfo
p.SetScriptHash(np.netmapClient.ContractAddress())
// new epoch event
p.SetType(newEpochNotification)
p.SetParser(netmapEvent.ParseNewEpoch)
parsers = append(parsers, p)
return parsers
}
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{
{
Contract: np.netmapClient.ContractAddress(),
Type: newEpochNotification,
Parser: netmapEvent.ParseNewEpoch,
Handlers: []event.Handler{np.handleNewEpoch},
},
}
handlers := make([]event.NotificationHandlerInfo, 0, 3)
var i event.NotificationHandlerInfo
i.SetScriptHash(np.netmapClient.ContractAddress())
// new epoch handler
i.SetType(newEpochNotification)
i.SetHandler(np.handleNewEpoch)
handlers = append(handlers, i)
return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -62,7 +62,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool {
func (s *Server) InnerRingIndex(ctx context.Context) int {
index, err := s.statusIndex.InnerRingIndex()
if err != nil {
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
return -1
}
@ -74,7 +74,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int {
func (s *Server) InnerRingSize(ctx context.Context) int {
size, err := s.statusIndex.InnerRingSize()
if err != nil {
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
return 0
}
@ -86,7 +86,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
func (s *Server) AlphabetIndex(ctx context.Context) int {
index, err := s.statusIndex.AlphabetIndex()
if err != nil {
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
return -1
}
@ -132,7 +132,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
zap.Error(err))
zap.String("error", err.Error()))
}
})

View file

@ -129,7 +129,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
})
})
if err != nil {
return fmt.Errorf("determine DB size: %w", err)
return fmt.Errorf("can't determine DB size: %w", err)
}
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
@ -140,7 +140,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
return saveItemsCount(tx, items)
}); err != nil {
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
return fmt.Errorf("save blobovnicza's size and items count: %w", err)
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
}
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
}

View file

@ -6,6 +6,7 @@ import (
"syscall"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -93,6 +94,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
zap.String("binary size", stringifyByteSize(dataSize)),
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
b.itemDeleted(recordSize)
}

View file

@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes,
if prm.ignoreErrors {
return nil
}
return fmt.Errorf("decode address key: %w", err)
return fmt.Errorf("could not decode address key: %w", err)
}
}

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -18,10 +19,7 @@ import (
"go.uber.org/zap"
)
var (
errObjectIsDeleteProtected = errors.New("object is delete protected")
deleteRes = common.DeleteRes{}
)
var errObjectIsDeleteProtected = errors.New("object is delete protected")
// Delete deletes object from blobovnicza tree.
//
@ -45,17 +43,17 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
defer span.End()
if b.readOnly {
return deleteRes, common.ErrReadOnly
return common.DeleteRes{}, common.ErrReadOnly
}
if b.rebuildGuard.TryRLock() {
defer b.rebuildGuard.RUnlock()
} else {
return deleteRes, errRebuildInProgress
return common.DeleteRes{}, errRebuildInProgress
}
if b.deleteProtectedObjects.Contains(prm.Address) {
return deleteRes, errObjectIsDeleteProtected
return common.DeleteRes{}, errObjectIsDeleteProtected
}
var bPrm blobovnicza.DeletePrm
@ -84,7 +82,8 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if !client.IsErrObjectNotFound(err) {
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
zap.String("level", p),
zap.Error(err),
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
}
@ -99,7 +98,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if err == nil && !objectFound {
// not found in any blobovnicza
return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound))
return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
success = err == nil
@ -113,7 +112,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz
shBlz := b.getBlobovnicza(ctx, blzPath)
blz, err := shBlz.Open(ctx)
if err != nil {
return deleteRes, err
return common.DeleteRes{}, err
}
defer shBlz.Close(ctx)
@ -123,5 +122,5 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz
// removes object from blobovnicza and returns common.DeleteRes.
func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) {
_, err := blz.Delete(ctx, prm)
return deleteRes, err
return common.DeleteRes{}, err
}

View file

@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"go.opentelemetry.io/otel/attribute"
@ -56,7 +57,8 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
if !client.IsErrObjectNotFound(err) {
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.Error(err))
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -68,7 +69,8 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
if !client.IsErrObjectNotFound(err) {
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.Error(err),
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
}
@ -113,13 +115,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil

View file

@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -70,7 +71,8 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if !outOfBounds && !client.IsErrObjectNotFound(err) {
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.Error(err))
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
if outOfBounds {
return true, err
@ -128,13 +130,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err)
return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err)
return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
}
from := prm.Range.GetOffset()

View file

@ -44,12 +44,12 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
if prm.IgnoreErrors {
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", elem.Address()),
zap.Error(err),
zap.String("err", err.Error()),
zap.String("storage_id", p),
zap.String("root_path", b.rootPath))
return nil
}
return fmt.Errorf("decompress object data: %w", err)
return fmt.Errorf("could not decompress object data: %w", err)
}
if prm.Handler != nil {
@ -77,12 +77,12 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
if err != nil {
if ignoreErrors {
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Error(err),
zap.String("err", err.Error()),
zap.String("storage_id", p),
zap.String("root_path", b.rootPath))
return false, nil
}
return false, fmt.Errorf("open blobovnicza %s: %w", p, err)
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
}
defer shBlz.Close(ctx)
@ -249,12 +249,6 @@ func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Addres
}
func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
default:
}
sysPath := filepath.Join(b.rootPath, path)
entries, err := os.ReadDir(sysPath)
if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode

View file

@ -69,10 +69,10 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
)...)
if err := blz.Open(ctx); err != nil {
return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err)
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
}
if err := blz.Init(ctx); err != nil {
return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err)
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
}
b.refCount++
@ -97,7 +97,7 @@ func (b *sharedDB) Close(ctx context.Context) {
if err := b.blcza.Close(ctx); err != nil {
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.Error(err),
zap.String("error", err.Error()),
)
}
b.blcza = nil
@ -125,9 +125,9 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
if err := b.blcza.Close(ctx); err != nil {
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.Error(err),
zap.String("error", err.Error()),
)
return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err)
return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err)
}
b.refCount = 0

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@ -82,14 +83,16 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
zap.Error(err))
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return false, nil
}
if active == nil {
i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return false, nil
}
defer active.Close(ctx)
@ -103,7 +106,8 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
} else {
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", active.SystemPath()),
zap.Error(err))
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
if errors.Is(err, blobovnicza.ErrNoSpace) {
i.AllFull = true

View file

@ -74,7 +74,7 @@ func (b *BlobStor) Close(ctx context.Context) error {
for i := range b.storage {
err := b.storage[i].Storage.Close(ctx)
if err != nil {
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err))
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
if firstErr == nil {
firstErr = err
}

View file

@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@ -74,7 +75,8 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
for _, err := range errors[:len(errors)-1] {
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
zap.Stringer("address", prm.Address),
zap.Error(err))
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return common.ExistsRes{}, errors[len(errors)-1]

View file

@ -153,7 +153,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
if err != nil {
if prm.IgnoreErrors {
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Error(err),
zap.String("err", err.Error()),
zap.String("directory_path", dirPath))
return nil
}
@ -202,7 +202,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
if prm.IgnoreErrors {
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", addr),
zap.Error(err),
zap.String("err", err.Error()),
zap.String("path", path))
continue
}
@ -538,7 +538,7 @@ func (t *FSTree) countFiles() (uint64, uint64, error) {
},
)
if err != nil {
return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
}
return count, size, nil
@ -577,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
},
)
if err != nil {
return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
}
success = true
return result, nil

View file

@ -136,6 +136,6 @@ func (w *genericWriter) removeWithCounter(p string, size uint64) error {
if err := os.Remove(p); err != nil {
return err
}
w.fileCounter.Dec(size)
w.fileCounter.Dec(uint64(size))
return nil
}

View file

@ -69,13 +69,10 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
if err != nil {
return err
}
written := 0
tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10)
n, err := unix.Write(fd, data)
for err == nil {
written += n
if written == len(data) {
if err == nil {
if n == len(data) {
err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW)
if err == nil {
w.fileCounter.Inc(uint64(len(data)))
@ -83,23 +80,9 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
if errors.Is(err, unix.EEXIST) {
err = nil
}
break
} else {
err = errors.New("incomplete write")
}
// From man 2 write:
// https://www.man7.org/linux/man-pages/man2/write.2.html
//
// Note that a successful write() may transfer fewer than count
// bytes. Such partial writes can occur for various reasons; for
// example, because there was insufficient space on the disk device
// to write all of the requested bytes, or because a blocked write()
// to a socket, pipe, or similar was interrupted by a signal handler
// after it had transferred some, but before it had transferred all
// of the requested bytes. In the event of a partial write, the
// caller can make another write() call to transfer the remaining
// bytes. The subsequent call will either transfer further bytes or
// may result in an error (e.g., if the disk is now full).
n, err = unix.Write(fd, data[written:])
}
errClose := unix.Close(fd)
if err != nil {
@ -131,7 +114,7 @@ func (w *linuxWriter) removeFile(p string, size uint64) error {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if err == nil {
w.fileCounter.Dec(size)
w.fileCounter.Dec(uint64(size))
}
return err
}

View file

@ -1,42 +0,0 @@
//go:build linux && integration
package fstree
import (
"context"
"errors"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
"golang.org/x/sys/unix"
)
func TestENOSPC(t *testing.T) {
dir, err := os.MkdirTemp(t.TempDir(), "ramdisk")
require.NoError(t, err)
f, err := os.CreateTemp(t.TempDir(), "ramdisk_*")
require.NoError(t, err)
err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M")
if errors.Is(err, unix.EPERM) {
t.Skipf("skip size tests: no permission to mount: %v", err)
return
}
require.NoError(t, err)
defer func() {
require.NoError(t, unix.Unmount(dir, 0))
}()
fst := New(WithPath(dir), WithDepth(1))
require.NoError(t, fst.Open(mode.ComponentReadWrite))
require.NoError(t, fst.Init())
_, err = fst.Put(context.Background(), common.PutPrm{
RawData: make([]byte, 10<<20),
})
require.ErrorIs(t, err, common.ErrNoSpace)
}

View file

@ -45,7 +45,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.String("storage_path", b.storage[i].Storage.Path()),
zap.String("storage_type", b.storage[i].Storage.Type()),
zap.Error(err))
zap.String("err", err.Error()))
continue
}
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)

View file

@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes,
// Decompress the data.
var err error
if data, err = s.compression.Decompress(data); err != nil {
return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
}
// Unmarshal the SDK object.
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil
@ -133,11 +133,11 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common
elem := common.IterationElement{
ObjectData: v,
}
if err := elem.Address.DecodeString(k); err != nil {
if err := elem.Address.DecodeString(string(k)); err != nil {
if req.IgnoreErrors {
continue
}
return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err))
return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err))
}
var err error
if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil {

View file

@ -27,7 +27,7 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
}
}
if err != nil {
return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
}
b.mode = m

View file

@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
// marshal object
data, err := prm.Object.Marshal()
if err != nil {
return common.PutRes{}, fmt.Errorf("marshal the object: %w", err)
return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err)
}
prm.RawData = data
}

View file

@ -48,8 +48,8 @@ func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm)
defer elapsed("ContainerSize", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
res = e.containerSize(ctx, prm)
return nil
res, err = e.containerSize(ctx, prm)
return err
})
return
@ -69,7 +69,7 @@ func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, er
return res.Size(), nil
}
func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes) {
func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) {
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
var csPrm shard.ContainerSizePrm
csPrm.SetContainerID(prm.cnr)
@ -96,8 +96,8 @@ func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm)
defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
res = e.listContainers(ctx)
return nil
res, err = e.listContainers(ctx)
return err
})
return
@ -115,7 +115,7 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) {
return res.Containers(), nil
}
func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes {
func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) {
uniqueIDs := make(map[string]cid.ID)
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
@ -142,5 +142,5 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes {
return ListContainersRes{
containers: result,
}
}, nil
}

View file

@ -95,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
err := eg.Wait()
close(errCh)
if err != nil {
return fmt.Errorf("initialize shards: %w", err)
return fmt.Errorf("failed to initialize shards: %w", err)
}
for res := range errCh {
@ -117,7 +117,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
continue
}
return fmt.Errorf("initialize shard %s: %w", res.id, res.err)
return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err)
}
}
@ -167,7 +167,7 @@ func (e *StorageEngine) close(ctx context.Context, releasePools bool) error {
if err := sh.Close(ctx); err != nil {
e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
zap.String("id", id),
zap.Error(err),
zap.String("error", err.Error()),
)
}
}
@ -320,7 +320,7 @@ loop:
for _, newID := range shardsToAdd {
sh, err := e.createShard(ctx, rcfg.shards[newID])
if err != nil {
return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err)
return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err)
}
idStr := sh.ID().String()
@ -331,13 +331,13 @@ loop:
}
if err != nil {
_ = sh.Close(ctx)
return fmt.Errorf("init %s shard: %w", idStr, err)
return fmt.Errorf("could not init %s shard: %w", idStr, err)
}
err = e.addShard(sh)
if err != nil {
_ = sh.Close(ctx)
return fmt.Errorf("add %s shard: %w", idStr, err)
return fmt.Errorf("could not add %s shard: %w", idStr, err)
}
e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))

Some files were not shown because too many files have changed in this diff Show more