Compare commits

..

1 commit

Author SHA1 Message Date
09c66fc93c
WIP cli: Allow to upload object parts in parallel
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-11-29 15:46:46 +03:00
353 changed files with 3724 additions and 4134 deletions

View file

@ -1,28 +0,0 @@
name: OCI image
on:
push:
workflow_dispatch:
jobs:
image:
name: Build container images
runs-on: docker
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Build OCI image
run: make images
- name: Push image to OCI registry
run: |
echo "$REGISTRY_PASSWORD" \
| docker login --username truecloudlab --password-stdin git.frostfs.info
make push-images
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}

View file

@ -18,7 +18,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.23.6' go-version: '1.23'
- name: Install govulncheck - name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -89,7 +89,5 @@ linters:
- protogetter - protogetter
- intrange - intrange
- tenv - tenv
- unconvert
- unparam
disable-all: true disable-all: true
fast: false fast: false

View file

@ -1,3 +0,0 @@
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22 GO_VERSION ?= 1.22
LINT_VERSION ?= 1.62.2 LINT_VERSION ?= 1.62.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0 PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
@ -139,15 +139,6 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images # Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
# Push FrostFS components' docker image to the registry
push-image-%:
@echo "⇒ Publish FrostFS $* docker image "
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
# Push all Docker images to the registry
.PHONY: push-images
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
# Run `make %` in Golang container # Run `make %` in Golang container
docker/%: docker/%:
docker run --rm -t \ docker run --rm -t \
@ -279,12 +270,10 @@ env-up: all
echo "Frostfs contracts not found"; exit 1; \ echo "Frostfs contracts not found"; exit 1; \
fi fi
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH} ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
--storage-wallet ./dev/storage/wallet01.json \ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
--storage-wallet ./dev/storage/wallet02.json \ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
--storage-wallet ./dev/storage/wallet03.json \ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
--storage-wallet ./dev/storage/wallet04.json
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \ @if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
make locode-download; \ make locode-download; \
fi fi

View file

@ -28,7 +28,6 @@ const (
var ( var (
errNoPathsFound = errors.New("no metabase paths found") errNoPathsFound = errors.New("no metabase paths found")
errNoMorphEndpointsFound = errors.New("no morph endpoints found") errNoMorphEndpointsFound = errors.New("no morph endpoints found")
errUpgradeFailed = errors.New("upgrade failed")
) )
var UpgradeCmd = &cobra.Command{ var UpgradeCmd = &cobra.Command{
@ -92,20 +91,15 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil { if err := eg.Wait(); err != nil {
return err return err
} }
allSuccess := true
for mb, ok := range result { for mb, ok := range result {
if ok { if ok {
cmd.Println(mb, ": success") cmd.Println(mb, ": success")
} else { } else {
cmd.Println(mb, ": failed") cmd.Println(mb, ": failed")
allSuccess = false
} }
} }
if allSuccess {
return nil return nil
} }
return errUpgradeFailed
}
func getMetabasePaths(appCfg *config.Config) ([]string, error) { func getMetabasePaths(appCfg *config.Config) ([]string, error) {
var paths []string var paths []string
@ -141,7 +135,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er
if err != nil { if err != nil {
return nil, fmt.Errorf("resolve container contract hash: %w", err) return nil, fmt.Errorf("resolve container contract hash: %w", err)
} }
cc, err := morphcontainer.NewFromMorph(cli, sh, 0) cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
if err != nil { if err != nil {
return nil, fmt.Errorf("create morph container client: %w", err) return nil, fmt.Errorf("create morph container client: %w", err)
} }

View file

@ -253,7 +253,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
reader := frostfsidrpclient.NewReader(inv, hash) reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces() sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items) namespaces, err := frostfsidclient.ParseNamespaces(items)
@ -305,7 +305,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListNamespaceSubjects(ns) sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID)) subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
@ -319,7 +319,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListSubjects() sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items) subj, err := frostfsidclient.ParseSubject(items)
@ -365,7 +365,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroups(ns) sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items) groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err) commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
@ -415,7 +415,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID)) sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err) subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
@ -492,17 +492,17 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
} }
func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) { func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool var shouldStop bool
res := make([]stackitem.Item, 0) res := make([]stackitem.Item, 0)
for !shouldStop { for !shouldStop {
items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize) items, err := inv.TraverseIterator(sessionID, iter, batchSize)
if err != nil { if err != nil {
return nil, err return nil, err
} }
res = append(res, items...) res = append(res, items...)
shouldStop = len(items) < iteratorBatchSize shouldStop = len(items) < batchSize
} }
return res, nil return res, nil

View file

@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/smartcontract"
@ -140,12 +141,42 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
} }
func generateStorageCreds(cmd *cobra.Command, _ []string) error { func generateStorageCreds(cmd *cobra.Command, _ []string) error {
walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) return refillGas(cmd, storageGasConfigFlag, true)
w, err := wallet.NewWallet(walletPath)
if err != nil {
return fmt.Errorf("create wallet: %w", err)
} }
func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
// storage wallet path is not part of the config
storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
// wallet address is not part of the config
walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
var gasReceiver util.Uint160
if len(walletAddress) != 0 {
gasReceiver, err = address.StringToUint160(walletAddress)
if err != nil {
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
}
} else {
if storageWalletPath == "" {
return fmt.Errorf("missing wallet path (use '--%s <out.json>')", commonflags.StorageWalletFlag)
}
var w *wallet.Wallet
if createWallet {
w, err = wallet.NewWallet(storageWalletPath)
} else {
w, err = wallet.NewWalletFromFile(storageWalletPath)
}
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
if createWallet {
var password string
label, _ := cmd.Flags().GetString(storageWalletLabelFlag) label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
password, err := config.GetStoragePassword(viper.GetViper(), label) password, err := config.GetStoragePassword(viper.GetViper(), label)
if err != nil { if err != nil {
@ -159,10 +190,11 @@ func generateStorageCreds(cmd *cobra.Command, _ []string) error {
if err := w.CreateAccount(label, password); err != nil { if err := w.CreateAccount(label, password); err != nil {
return fmt.Errorf("can't create account: %w", err) return fmt.Errorf("can't create account: %w", err)
} }
return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
} }
func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) { gasReceiver = w.Accounts[0].Contract.ScriptHash()
}
gasStr := viper.GetString(gasFlag) gasStr := viper.GetString(gasFlag)
gasAmount, err := helper.ParseGASAmount(gasStr) gasAmount, err := helper.ParseGASAmount(gasStr)
@ -176,11 +208,9 @@ func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160)
} }
bw := io.NewBufBinWriter() bw := io.NewBufBinWriter()
for _, gasReceiver := range gasReceivers {
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All, emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil) wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
emit.Opcodes(bw.BinWriter, opcode.ASSERT) emit.Opcodes(bw.BinWriter, opcode.ASSERT)
}
if bw.Err != nil { if bw.Err != nil {
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err) return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
} }

View file

@ -1,12 +1,7 @@
package generate package generate
import ( import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
@ -38,27 +33,7 @@ var (
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag)) _ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
}, },
RunE: func(cmd *cobra.Command, _ []string) error { RunE: func(cmd *cobra.Command, _ []string) error {
storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag) return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
var gasReceivers []util.Uint160
for _, walletAddress := range walletAddresses {
addr, err := address.StringToUint160(walletAddress)
if err != nil {
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
}
gasReceivers = append(gasReceivers, addr)
}
for _, storageWalletPath := range storageWalletPaths {
w, err := wallet.NewWalletFromFile(storageWalletPath)
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
}
return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
}, },
} }
GenerateAlphabetCmd = &cobra.Command{ GenerateAlphabetCmd = &cobra.Command{
@ -75,10 +50,10 @@ var (
func initRefillGasCmd() { func initRefillGasCmd() {
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet") RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet") RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer") RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag) RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
} }
func initGenerateStorageCmd() { func initGenerateStorageCmd() {

View file

@ -4,6 +4,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@ -40,8 +41,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
} }
accHash := w.GetChangeAddress() accHash := w.GetChangeAddress()
addr, _ := cmd.Flags().GetString(walletAccountFlag) if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
if addr != "" {
accHash, err = address.StringToUint160(addr) accHash, err = address.StringToUint160(addr)
if err != nil { if err != nil {
return fmt.Errorf("invalid address: %s", addr) return fmt.Errorf("invalid address: %s", addr)
@ -73,10 +73,17 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return err return err
} }
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag) till := int64(defaultNotaryDepositLifetime)
if till <= 0 { tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
if err != nil {
return err
}
if tillStr != "" {
till, err = strconv.ParseInt(tillStr, 10, 64)
if err != nil || till <= 0 {
return errInvalidNotaryDepositLifetime return errInvalidNotaryDepositLifetime
} }
}
return transferGas(cmd, acc, accHash, gasAmount, till) return transferGas(cmd, acc, accHash, gasAmount, till)
} }

View file

@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet") DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address") DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit") DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks") DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
} }
func init() { func init() {

View file

@ -20,32 +20,23 @@ const (
accountAddressFlag = "account" accountAddressFlag = "account"
) )
func parseAddresses(cmd *cobra.Command) []util.Uint160 { func addProxyAccount(cmd *cobra.Command, _ []string) {
var addrs []util.Uint160 acc, _ := cmd.Flags().GetString(accountAddressFlag)
accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
for _, acc := range accs {
addr, err := address.StringToUint160(acc) addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err) commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "addAccount")
addrs = append(addrs, addr)
}
return addrs
}
func addProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd)
err := processAccount(cmd, addrs, "addAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err) commonCmd.ExitOnErr(cmd, "processing error: %w", err)
} }
func removeProxyAccount(cmd *cobra.Command, _ []string) { func removeProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd) acc, _ := cmd.Flags().GetString(accountAddressFlag)
err := processAccount(cmd, addrs, "removeAccount") addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "removeAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err) commonCmd.ExitOnErr(cmd, "processing error: %w", err)
} }
func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error { func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil { if err != nil {
return fmt.Errorf("can't initialize context: %w", err) return fmt.Errorf("can't initialize context: %w", err)
@ -63,9 +54,7 @@ func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) err
} }
bw := io.NewBufBinWriter() bw := io.NewBufBinWriter()
for _, addr := range addrs {
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
}
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err return err

View file

@ -29,15 +29,13 @@ var (
func initProxyAddAccount() { func initProxyAddAccount() {
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
} }
func initProxyRemoveAccount() { func initProxyRemoveAccount() {
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
} }

View file

@ -11,7 +11,6 @@ import (
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"strconv" "strconv"
"strings" "strings"
"text/template" "text/template"
@ -106,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
fatalOnErr(errors.New("can't find account in wallet")) fatalOnErr(errors.New("can't find account in wallet"))
} }
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account)) c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
fatalOnErr(err) fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams()) err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
@ -411,7 +410,8 @@ func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client var c *rpcclient.Client
var err error var err error
shuffled := slices.Clone(rpc) shuffled := make([]string, len(rpc))
copy(shuffled, rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled { for _, endpoint := range shuffled {

View file

@ -4,14 +4,17 @@ import (
"bytes" "bytes"
"cmp" "cmp"
"context" "context"
"crypto/ecdsa"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"os" "os"
"slices" "slices"
"sort"
"strings" "strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
buffPool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
@ -19,7 +22,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"golang.org/x/sync/errgroup"
) )
var errMissingHeaderInResponse = errors.New("missing header in response") var errMissingHeaderInResponse = errors.New("missing header in response")
@ -77,31 +82,13 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers. // SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID { func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers() list := x.cliRes.Containers()
slices.SortFunc(list, func(lhs, rhs cid.ID) int { sort.Slice(list, func(i, j int) bool {
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString()) lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
return strings.Compare(lhs, rhs) < 0
}) })
return list return list
} }
func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
cliPrm := &client.PrmContainerListStream{
XHeaders: prm.XHeaders,
OwnerID: prm.OwnerID,
Session: prm.Session,
}
rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
if err != nil {
return fmt.Errorf("init container list: %w", err)
}
err = rdr.Iterate(processCnr)
if err != nil {
return fmt.Errorf("read container list: %w", err)
}
return
}
// PutContainerPrm groups parameters of PutContainer operation. // PutContainerPrm groups parameters of PutContainer operation.
type PutContainerPrm struct { type PutContainerPrm struct {
Client *client.Client Client *client.Client
@ -466,6 +453,119 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
}, nil }, nil
} }
func PutSingle(ctx context.Context, pk *ecdsa.PrivateKey, threadCount int, maxSize uint64, prm PutObjectPrm) (*PutObjectRes, error) {
res, err := prm.cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
if maxSize == 0 {
maxSize = res.Info().MaxObjectSize()
}
if threadCount <= 0 {
threadCount = 1
}
it := &internalTarget{client: prm.cli, prm: client.PrmObjectPutSingle{
XHeaders: prm.xHeaders,
BearerToken: prm.bearerToken,
Local: prm.local,
CopiesNumber: prm.copyNum,
Key: pk,
}}
it.eg.SetLimit(threadCount)
defer it.eg.Wait()
sz := prm.hdr.PayloadSize()
pool := buffPool.NewBufferPool(uint32(res.Info().MaxObjectSize()))
wrt := transformer.NewPayloadSizeLimiter(transformer.Params{
Key: pk,
NextTargetInit: func() transformer.ObjectWriter {
return it
},
NetworkState: epochSource(res.Info().CurrentEpoch()),
MaxSize: maxSize,
WithoutHomomorphicHash: res.Info().HomomorphicHashingDisabled(),
SizeHint: sz,
Pool: &pool,
})
if err := wrt.WriteHeader(ctx, prm.hdr); err != nil {
return nil, err
}
if prm.headerCallback != nil {
prm.headerCallback()
}
if data := prm.hdr.Payload(); len(data) > 0 {
if prm.rdr != nil {
prm.rdr = io.MultiReader(bytes.NewReader(data), prm.rdr)
} else {
prm.rdr = bytes.NewReader(data)
sz = uint64(len(data))
}
}
if prm.rdr != nil {
const defaultBufferSizePut = 4 << 20 // Maximum chunk size is 3 MiB in the SDK.
if sz == 0 || sz > defaultBufferSizePut {
sz = defaultBufferSizePut
}
buf := make([]byte, sz)
var n int
for {
n, err = prm.rdr.Read(buf)
if n > 0 {
if _, err := wrt.Write(ctx, buf[:n]); err != nil {
return nil, err
}
continue
}
if errors.Is(err, io.EOF) {
break
}
return nil, fmt.Errorf("read payload: %w", err)
}
}
if err := it.eg.Wait(); err != nil {
return nil, err
}
cliRes, err := wrt.Close(ctx)
if err != nil { // here err already carries both status and client errors
return nil, fmt.Errorf("client failure: %w", err)
}
return &PutObjectRes{id: cliRes.SelfID}, nil
}
type internalTarget struct {
eg errgroup.Group
client *client.Client
pool *buffPool.BufferPool
prm client.PrmObjectPutSingle
}
func (it *internalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
it.eg.Go(func() error {
defer it.pool.Put(&buffPool.Buffer{Data: obj.Payload()})
prm := it.prm
prm.Object = obj
_, err := it.client.ObjectPutSingle(ctx, prm)
return err
})
return nil
}
// DeleteObjectPrm groups parameters of DeleteObject operation. // DeleteObjectPrm groups parameters of DeleteObject operation.
type DeleteObjectPrm struct { type DeleteObjectPrm struct {
commonObjectPrm commonObjectPrm

View file

@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag) outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" { if outputPath != "" {
err := os.WriteFile(outputPath, overrideMarshalled, 0o644) err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err) commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else { } else {
fmt.Print("\n") fmt.Print("\n")

View file

@ -6,11 +6,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
// flags of list command. // flags of list command.
@ -54,52 +51,34 @@ var listContainersCmd = &cobra.Command{
var prm internalclient.ListContainersPrm var prm internalclient.ListContainersPrm
prm.SetClient(cli) prm.SetClient(cli)
prm.OwnerID = idUser prm.Account = idUser
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
prmGet := internalclient.GetContainerPrm{ prmGet := internalclient.GetContainerPrm{
Client: cli, Client: cli,
} }
var containerIDs []cid.ID
err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
printContainer(cmd, prmGet, id)
return false
})
if err == nil {
return
}
if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
containerIDs = res.SortedIDList()
} else {
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}
containerIDs := res.SortedIDList()
for _, cnrID := range containerIDs { for _, cnrID := range containerIDs {
printContainer(cmd, prmGet, cnrID)
}
},
}
func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
if flagVarListName == "" && !flagVarListPrintAttr { if flagVarListName == "" && !flagVarListPrintAttr {
cmd.Println(id.String()) cmd.Println(cnrID.String())
return continue
} }
prmGet.ClientParams.ContainerID = &id prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet) res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil { if err != nil {
cmd.Printf(" failed to read attributes: %v\n", err) cmd.Printf(" failed to read attributes: %v\n", err)
return continue
} }
cnr := res.Container() cnr := res.Container()
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
return continue
} }
cmd.Println(id.String()) cmd.Println(cnrID.String())
if flagVarListPrintAttr { if flagVarListPrintAttr {
cnr.IterateUserAttributes(func(key, val string) { cnr.IterateUserAttributes(func(key, val string) {
@ -107,6 +86,8 @@ func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, i
}) })
} }
} }
},
}
func initContainerListContainersCmd() { func initContainerListContainersCmd() {
commonflags.Init(listContainersCmd) commonflags.Init(listContainersCmd)

View file

@ -23,11 +23,11 @@ type policyPlaygroundREPL struct {
nodes map[string]netmap.NodeInfo nodes map[string]netmap.NodeInfo
} }
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{ return &policyPlaygroundREPL{
cmd: cmd, cmd: cmd,
nodes: map[string]netmap.NodeInfo{}, nodes: map[string]netmap.NodeInfo{},
} }, nil
} }
func (repl *policyPlaygroundREPL) handleLs(args []string) error { func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@ -246,7 +246,8 @@ var policyPlaygroundCmd = &cobra.Command{
Long: `A REPL for testing placement policies. Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`, If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) { Run: func(cmd *cobra.Command, _ []string) {
repl := newPolicyPlaygroundREPL(cmd) repl, err := newPolicyPlaygroundREPL(cmd)
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run()) commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
}, },
} }

View file

@ -0,0 +1,56 @@
package control
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
const ignoreErrorsFlag = "no-errors"
var evacuateShardCmd = &cobra.Command{
Use: "evacuate",
Short: "Evacuate objects from shard",
Long: "Evacuate objects from shard to other shards",
Run: evacuateShard,
Deprecated: "use frostfs-cli control shards evacuation start",
}
func evacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
req.Body.Shard_ID = getShardIDList(cmd)
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.EvacuateShardResponse
var err error
err = cli.ExecRaw(func(client *client.Client) error {
resp, err = control.EvacuateShard(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Shard has successfully been evacuated.")
}
func initControlEvacuateShardCmd() {
initControlFlags(evacuateShardCmd)
flags := evacuateShardCmd.Flags()
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
flags.Bool(shardAllFlag, false, "Process all shards")
flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}

View file

@ -21,7 +21,6 @@ const (
noProgressFlag = "no-progress" noProgressFlag = "no-progress"
scopeFlag = "scope" scopeFlag = "scope"
repOneOnlyFlag = "rep-one-only" repOneOnlyFlag = "rep-one-only"
ignoreErrorsFlag = "no-errors"
containerWorkerCountFlag = "container-worker-count" containerWorkerCountFlag = "container-worker-count"
objectWorkerCountFlag = "object-worker-count" objectWorkerCountFlag = "object-worker-count"

View file

@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.
var resp *control.GetNetmapStatusResponse var resp *control.GetNetmapStatusResponse
var err error var err error
err = cli.ExecRaw(func(client *rawclient.Client) error { err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.GetNetmapStatus(cmd.Context(), client, req) resp, err = control.GetNetmapStatus(client, req)
return err return err
}) })
commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err) commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)

View file

@ -13,6 +13,7 @@ var shardsCmd = &cobra.Command{
func initControlShardsCmd() { func initControlShardsCmd() {
shardsCmd.AddCommand(listShardsCmd) shardsCmd.AddCommand(listShardsCmd)
shardsCmd.AddCommand(setShardModeCmd) shardsCmd.AddCommand(setShardModeCmd)
shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(evacuationShardCmd) shardsCmd.AddCommand(evacuationShardCmd)
shardsCmd.AddCommand(flushCacheCmd) shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd) shardsCmd.AddCommand(doctorCmd)
@ -22,6 +23,7 @@ func initControlShardsCmd() {
initControlShardsListCmd() initControlShardsListCmd()
initControlSetShardModeCmd() initControlSetShardModeCmd()
initControlEvacuateShardCmd()
initControlEvacuationShardCmd() initControlEvacuationShardCmd()
initControlFlushCacheCmd() initControlFlushCacheCmd()
initControlDoctorCmd() initControlDoctorCmd()

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -41,9 +42,7 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...") flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
_ = objectHashCmd.MarkFlagRequired("range")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
flags.String(getRangeHashSaltFlag, "", "Salt in hex format") flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
} }
@ -67,6 +66,36 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd) pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
tz := typ == hashTz
fullHash := len(ranges) == 0
if fullHash {
var headPrm internalclient.HeadObjectPrm
headPrm.SetClient(cli)
Prepare(cmd, &headPrm)
headPrm.SetAddress(objAddr)
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
var cs checksum.Checksum
var csSet bool
if tz {
cs, csSet = res.Header().PayloadHomomorphicHash()
} else {
cs, csSet = res.Header().PayloadChecksum()
}
if csSet {
cmd.Println(hex.EncodeToString(cs.Value()))
} else {
cmd.Println("Missing checksum in object header.")
}
return
}
var hashPrm internalclient.HashPayloadRangesPrm var hashPrm internalclient.HashPayloadRangesPrm
hashPrm.SetClient(cli) hashPrm.SetClient(cli)
Prepare(cmd, &hashPrm) Prepare(cmd, &hashPrm)
@ -75,7 +104,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
hashPrm.SetSalt(salt) hashPrm.SetSalt(salt)
hashPrm.SetRanges(ranges) hashPrm.SetRanges(ranges)
if typ == hashTz { if tz {
hashPrm.TZ() hashPrm.TZ()
} }

View file

@ -1,12 +1,15 @@
package object package object
import ( import (
"bytes"
"cmp"
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"slices"
"sync" "sync"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
@ -320,7 +323,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem
} }
placementBuilder := placement.NewNetworkMapBuilder(netmap) placementBuilder := placement.NewNetworkMapBuilder(netmap)
for _, object := range objects { for _, object := range objects {
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy) placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err) commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
for repIdx, rep := range placement { for repIdx, rep := range placement {
numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects() numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
@ -358,7 +361,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
placementObjectID = object.ecHeader.parent placementObjectID = object.ecHeader.parent
} }
placementBuilder := placement.NewNetworkMapBuilder(netmap) placementBuilder := placement.NewNetworkMapBuilder(netmap)
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy) placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err) commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
for _, vector := range placement { for _, vector := range placement {
@ -504,6 +507,7 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
} }
func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
normilizeObjectNodesResult(objects, result)
if json, _ := cmd.Flags().GetBool(commonflags.JSON); json { if json, _ := cmd.Flags().GetBool(commonflags.JSON); json {
printObjectNodesAsJSON(cmd, objID, objects, result) printObjectNodesAsJSON(cmd, objID, objects, result)
} else { } else {
@ -511,6 +515,34 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
} }
} }
func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) {
slices.SortFunc(objects, func(lhs, rhs phyObject) int {
if lhs.ecHeader == nil && rhs.ecHeader == nil {
return bytes.Compare(lhs.objectID[:], rhs.objectID[:])
}
if lhs.ecHeader == nil {
return -1
}
if rhs.ecHeader == nil {
return 1
}
if lhs.ecHeader.parent == rhs.ecHeader.parent {
return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index)
}
return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:])
})
for _, obj := range objects {
op := result.placements[obj.objectID]
slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
})
slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
})
result.placements[obj.objectID] = op
}
}
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects)) fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))

View file

@ -46,7 +46,7 @@ func initObjectPatchCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2") flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
@ -99,9 +99,11 @@ func patch(cmd *cobra.Command, _ []string) {
} }
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName) var rawAttrs []string
if err != nil {
return nil, err raw := cmd.Flag(newAttrsFlagName).Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
} }
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -27,6 +27,8 @@ const (
notificationFlag = "notify" notificationFlag = "notify"
copiesNumberFlag = "copies-number" copiesNumberFlag = "copies-number"
prepareLocallyFlag = "prepare-locally" prepareLocallyFlag = "prepare-locally"
threadCountFlag = "thread-count"
maxSizeFlag = "max-size-mb"
) )
var putExpiredOn uint64 var putExpiredOn uint64
@ -50,7 +52,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2") flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute") flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute") flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object") flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@ -61,6 +63,8 @@ func initObjectPutCmd() {
flags.Bool(binaryFlag, false, "Deserialize object structure from given file.") flags.Bool(binaryFlag, false, "Deserialize object structure from given file.")
flags.String(copiesNumberFlag, "", "Number of copies of the object to store within the RPC call") flags.String(copiesNumberFlag, "", "Number of copies of the object to store within the RPC call")
flags.Int(threadCountFlag, 1, "Max objects to put in parallel")
flags.Uint64(maxSizeFlag, 0, "Max object size in mebibytes")
} }
func putObject(cmd *cobra.Command, _ []string) { func putObject(cmd *cobra.Command, _ []string) {
@ -107,6 +111,24 @@ func putObject(cmd *cobra.Command, _ []string) {
if prepareLocally, _ := cmd.Flags().GetBool(prepareLocallyFlag); prepareLocally { if prepareLocally, _ := cmd.Flags().GetBool(prepareLocallyFlag); prepareLocally {
prm.SetClient(internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)) prm.SetClient(internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC))
prm.PrepareLocally() prm.PrepareLocally()
Prepare(cmd, &prm)
prm.SetHeader(obj)
prm.SetPayloadReader(payloadReader)
copyNum, err := cmd.Flags().GetString(copiesNumberFlag)
commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err)
prm.SetCopiesNumberByVectors(parseCopyNumber(cmd, copyNum))
threadCount, _ := cmd.Flags().GetInt(threadCountFlag)
maxSize, _ := cmd.Flags().GetUint64(maxSizeFlag)
maxSize <<= 20 // In mebibytes.
res, err := internalclient.PutSingle(cmd.Context(), pk, threadCount, maxSize, prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
cmd.Printf("[%s] Object successfully stored\n", filename)
cmd.Printf(" OID: %s\n CID: %s\n", res.ID(), cnr)
} else { } else {
ReadOrOpenSession(cmd, &prm, pk, cnr, nil) ReadOrOpenSession(cmd, &prm, pk, cnr, nil)
} }
@ -214,9 +236,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
} }
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice("attributes") var rawAttrs []string
if err != nil {
return nil, err raw := cmd.Flag("attributes").Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
} }
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take data from in the form offset:length") flags.String("range", "", "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.") flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc) flags.Bool(rawFlag, false, rawFlagDesc)
} }
@ -195,10 +195,11 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
} }
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) { func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
vs, err := cmd.Flags().GetStringSlice("range") v := cmd.Flag("range").Value.String()
if len(vs) == 0 || err != nil { if len(v) == 0 {
return nil, err return nil, nil
} }
vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs)) rs := make([]objectSDK.Range, len(vs))
for i := range vs { for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep) before, after, found := strings.Cut(vs[i], rangeSep)

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
@ -33,9 +34,11 @@ func _client() (tree.TreeServiceClient, error) {
opts := []grpc.DialOption{ opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor( grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(), tracing.NewUnaryClientInteceptor(),
), ),
grpc.WithChainStreamInterceptor( grpc.WithChainStreamInterceptor(
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(),
), ),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),

View file

@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated") log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil { if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err), zap.String("error", err.Error()),
) )
} else { } else {
c.init(ctx) c.init(ctx)

View file

@ -119,12 +119,12 @@ func shutdown(ctx context.Context) {
innerRing.Stop(ctx) innerRing.Stop(ctx)
if err := metricsCmp.shutdown(ctx); err != nil { if err := metricsCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err), zap.String("error", err.Error()),
) )
} }
if err := pprofCmp.shutdown(ctx); err != nil { if err := pprofCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err), zap.String("error", err.Error()),
) )
} }

View file

@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated") log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil { if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err)) zap.String("error", err.Error()))
return return
} }

View file

@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren(
path := parentBucket.Path path := parentBucket.Path
parser := parentBucket.NextParser parser := parentBucket.NextParser
buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
if err != nil {
return err
}
for item := range buffer { for item := range buffer {
if item.err != nil { if item.err != nil {
@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren(
} }
bucket := item.val bucket := item.val
var err error
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil) bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
if err != nil { if err != nil {
return err return err
@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
defer cancel() defer cancel()
// Check the current bucket's nested buckets if exist // Check the current bucket's nested buckets if exist
bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range bucketsBuffer { for item := range bucketsBuffer {
if item.err != nil { if item.err != nil {
@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
b := item.val b := item.val
var err error
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil) b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
if err != nil { if err != nil {
return false, err return false, err
@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
// Check the current bucket's nested records if exist // Check the current bucket's nested records if exist
recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range recordsBuffer { for item := range recordsBuffer {
if item.err != nil { if item.err != nil {
@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
r := item.val r := item.val
var err error
r.Entry, _, err = bucket.NextParser(r.Key, r.Value) r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
if err != nil { if err != nil {
return false, err return false, err

View file

@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
func load[T any]( func load[T any](
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
filter func(key, value []byte) bool, transform func(key, value []byte) T, filter func(key, value []byte) bool, transform func(key, value []byte) T,
) <-chan Item[T] { ) (<-chan Item[T], error) {
buffer := make(chan Item[T], bufferSize) buffer := make(chan Item[T], bufferSize)
go func() { go func() {
@ -77,13 +77,13 @@ func load[T any](
} }
}() }()
return buffer return buffer, nil
} }
func LoadBuckets( func LoadBuckets(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Bucket] { ) (<-chan Item[*Bucket], error) {
buffer := load( buffer, err := load(
ctx, db, path, bufferSize, ctx, db, path, bufferSize,
func(_, value []byte) bool { func(_, value []byte) bool {
return value == nil return value == nil
@ -98,14 +98,17 @@ func LoadBuckets(
} }
}, },
) )
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer return buffer, nil
} }
func LoadRecords( func LoadRecords(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Record] { ) (<-chan Item[*Record], error) {
buffer := load( buffer, err := load(
ctx, db, path, bufferSize, ctx, db, path, bufferSize,
func(_, value []byte) bool { func(_, value []byte) bool {
return value != nil return value != nil
@ -121,8 +124,11 @@ func LoadRecords(
} }
}, },
) )
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer return buffer, nil
} }
// HasBuckets checks if a bucket has nested buckets. It relies on assumption // HasBuckets checks if a bucket has nested buckets. It relies on assumption
@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
buffer := load( buffer, err := load(
ctx, db, path, 1, ctx, db, path, 1,
nil, nil,
func(_, value []byte) []byte { return value }, func(_, value []byte) []byte { return value },
) )
if err != nil {
return false, err
}
x, ok := <-buffer x, ok := <-buffer
if !ok { if !ok {
return false, nil return false, nil
} }
if x.err != nil { if x.err != nil {
return false, x.err return false, err
} }
if x.val != nil { if x.val != nil {
return false, nil return false, err
} }
return true, nil return true, nil
} }

View file

@ -62,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error {
ctx, v.onUnmount = context.WithCancel(ctx) ctx, v.onUnmount = context.WithCancel(ctx)
tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
if err != nil {
return err
}
v.buffer = make(chan *Record, v.ui.loadBufferSize) v.buffer = make(chan *Record, v.ui.loadBufferSize)
go func() { go func() {
@ -70,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error {
for item := range tempBuffer { for item := range tempBuffer {
if item.err != nil { if item.err != nil {
v.ui.stopOnError(item.err) v.ui.stopOnError(err)
break break
} }
record := item.val record := item.val
var err error
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value) record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
if err != nil { if err != nil {
v.ui.stopOnError(err) v.ui.stopOnError(err)

View file

@ -19,7 +19,6 @@ func initAPEManagerService(c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash) c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage, execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
c.cfgMorph.client,
apemanager.WithLogger(c.log)) apemanager.WithLogger(c.log))
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc) sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit) auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"sync" "sync"
"time" "time"
@ -17,7 +16,7 @@ import (
"github.com/hashicorp/golang-lru/v2/expirable" "github.com/hashicorp/golang-lru/v2/expirable"
) )
type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) type netValueReader[K any, V any] func(K) (V, error)
type valueWithError[V any] struct { type valueWithError[V any] struct {
v V v V
@ -50,7 +49,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
// updates the value from the network on cache miss or by TTL. // updates the value from the network on cache miss or by TTL.
// //
// returned value should not be modified. // returned value should not be modified.
func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { func (c *ttlNetCache[K, V]) get(key K) (V, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -72,7 +71,7 @@ func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
return val.v, val.e return val.v, val.e
} }
v, err := c.netRdr(ctx, key) v, err := c.netRdr(key)
c.cache.Add(key, &valueWithError[V]{ c.cache.Add(key, &valueWithError[V]{
v: v, v: v,
@ -136,7 +135,7 @@ func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap]
// updates the value from the network on cache miss. // updates the value from the network on cache miss.
// //
// returned value should not be modified. // returned value should not be modified.
func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -149,7 +148,7 @@ func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, e
return val, nil return val, nil
} }
val, err := c.netRdr(ctx, key) val, err := c.netRdr(key)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -167,11 +166,11 @@ type ttlContainerStorage struct {
} }
func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) { lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(ctx, id) return v.Get(id)
}, metrics.NewCacheMetrics("container")) }, metrics.NewCacheMetrics("container"))
lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(ctx, id) return v.DeletionInfo(id)
}, metrics.NewCacheMetrics("container_deletion_info")) }, metrics.NewCacheMetrics("container_deletion_info"))
return ttlContainerStorage{ return ttlContainerStorage{
@ -189,12 +188,12 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
// Get returns container value from the cache. If value is missing in the cache // Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache. // or expired, then it returns value from side chain and updates the cache.
func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) { func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
return s.containerCache.get(ctx, cnr) return s.containerCache.get(cnr)
} }
func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) { func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
return s.delInfoCache.get(ctx, cnr) return s.delInfoCache.get(cnr)
} }
type lruNetmapSource struct { type lruNetmapSource struct {
@ -206,8 +205,8 @@ type lruNetmapSource struct {
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10 const netmapCacheSize = 10
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
return v.GetNetMapByEpoch(ctx, key) return v.GetNetMapByEpoch(key)
}, metrics.NewCacheMetrics("netmap")) }, metrics.NewCacheMetrics("netmap"))
return &lruNetmapSource{ return &lruNetmapSource{
@ -216,16 +215,16 @@ func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
} }
} }
func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff) return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
} }
func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, epoch) return s.getNetMapByEpoch(epoch)
} }
func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
val, err := s.cache.get(ctx, epoch) val, err := s.cache.get(epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -233,7 +232,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*
return val, nil return val, nil
} }
func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) { func (s *lruNetmapSource) Epoch() (uint64, error) {
return s.netState.CurrentEpoch(), nil return s.netState.CurrentEpoch(), nil
} }
@ -241,10 +240,7 @@ type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte] *ttlNetCache[struct{}, [][]byte]
} }
func newCachedIRFetcher(f interface { func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
InnerRingKeys(ctx context.Context) ([][]byte, error)
},
) cachedIRFetcher {
const ( const (
irFetcherCacheSize = 1 // we intend to store only one value irFetcherCacheSize = 1 // we intend to store only one value
@ -258,8 +254,8 @@ func newCachedIRFetcher(f interface {
) )
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL, irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
func(ctx context.Context, _ struct{}) ([][]byte, error) { func(_ struct{}) ([][]byte, error) {
return f.InnerRingKeys(ctx) return f.InnerRingKeys()
}, metrics.NewCacheMetrics("ir_keys"), }, metrics.NewCacheMetrics("ir_keys"),
) )
@ -269,8 +265,8 @@ func newCachedIRFetcher(f interface {
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in // InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates // the cache or expired, then it returns keys from side chain and updates
// the cache. // the cache.
func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) { func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
val, err := f.get(ctx, struct{}{}) val, err := f.get(struct{}{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -293,7 +289,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M
} }
} }
func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
const ttl = time.Second * 30 const ttl = time.Second * 30
hit := false hit := false
@ -315,7 +311,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
c.mtx.Lock() c.mtx.Lock()
size = c.lastSize size = c.lastSize
if !c.lastUpdated.After(prevUpdated) { if !c.lastUpdated.After(prevUpdated) {
size = c.src.MaxObjectSize(ctx) size = c.src.MaxObjectSize()
c.lastSize = size c.lastSize = size
c.lastUpdated = time.Now() c.lastUpdated = time.Now()
} }

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"errors" "errors"
"testing" "testing"
"time" "time"
@ -18,7 +17,7 @@ func TestTTLNetCache(t *testing.T) {
t.Run("Test Add and Get", func(t *testing.T) { t.Run("Test Add and Get", func(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, ti, val) require.Equal(t, ti, val)
}) })
@ -27,7 +26,7 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
time.Sleep(2 * ttlDuration) time.Sleep(2 * ttlDuration)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, val, ti) require.NotEqual(t, val, ti)
}) })
@ -36,20 +35,20 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
cache.remove(key) cache.remove(key)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, val, ti) require.NotEqual(t, val, ti)
}) })
t.Run("Test Cache Error", func(t *testing.T) { t.Run("Test Cache Error", func(t *testing.T) {
cache.set("error", time.Now(), errors.New("mock error")) cache.set("error", time.Now(), errors.New("mock error"))
_, err := cache.get(context.Background(), "error") _, err := cache.get("error")
require.Error(t, err) require.Error(t, err)
require.Equal(t, "mock error", err.Error()) require.Equal(t, "mock error", err.Error())
}) })
} }
func testNetValueReader(_ context.Context, key string) (time.Time, error) { func testNetValueReader(key string) (time.Time, error) {
if key == "error" { if key == "error" {
return time.Now(), errors.New("mock error") return time.Now(), errors.New("mock error")
} }

View file

@ -493,7 +493,6 @@ type cfg struct {
cfgNetmap cfgNetmap cfgNetmap cfgNetmap
cfgControlService cfgControlService cfgControlService cfgControlService
cfgObject cfgObject cfgObject cfgObject
cfgQoSService cfgQoSService
} }
// ReadCurrentNetMap reads network map which has been cached at the // ReadCurrentNetMap reads network map which has been cached at the
@ -592,6 +591,8 @@ type cfgMorph struct {
client *client.Client client *client.Client
notaryEnabled bool
// TTL of Sidechain cached values. Non-positive value disables caching. // TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration cacheTTL time.Duration
@ -610,7 +611,6 @@ type cfgContainer struct {
parsers map[event.Type]event.NotificationParser parsers map[event.Type]event.NotificationParser
subscribers map[event.Type][]event.Handler subscribers map[event.Type][]event.Handler
workerPool util.WorkerPool // pool for asynchronous handlers workerPool util.WorkerPool // pool for asynchronous handlers
containerBatchSize uint32
} }
type cfgFrostfsID struct { type cfgFrostfsID struct {
@ -699,7 +699,8 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector netState.metrics = c.metricsCollector
logPrm := c.loggerPrm() logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm) log, err := logger.NewLogger(logPrm)
fatalOnErr(err) fatalOnErr(err)
@ -854,7 +855,7 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
func initCfgGRPC() cfgGRPC { func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
return cfgGRPC{ return cfgGRPC{
maxChunkSize: maxChunkSize, maxChunkSize: maxChunkSize,
@ -1059,7 +1060,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return sh return sh
} }
func (c *cfg) loggerPrm() *logger.Prm { func (c *cfg) loggerPrm() (*logger.Prm, error) {
// check if it has been inited before // check if it has been inited before
if c.dynamicConfiguration.logger == nil { if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm) c.dynamicConfiguration.logger = new(logger.Prm)
@ -1078,7 +1079,7 @@ func (c *cfg) loggerPrm() *logger.Prm {
} }
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger return c.dynamicConfiguration.logger, nil
} }
func (c *cfg) LocalAddress() network.AddressGroup { func (c *cfg) LocalAddress() network.AddressGroup {
@ -1120,7 +1121,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
err := ls.Close(context.WithoutCancel(ctx)) err := ls.Close(context.WithoutCancel(ctx))
if err != nil { if err != nil {
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure, c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
zap.Error(err), zap.String("error", err.Error()),
) )
} else { } else {
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
@ -1147,7 +1148,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash) c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg) cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { if cacheSize > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
} }
@ -1206,11 +1207,11 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
} }
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
ni, err := c.netmapLocalNodeState(ctx, epoch) ni, err := c.netmapLocalNodeState(epoch)
if err != nil { if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch), zap.Uint64("epoch", epoch),
zap.Error(err)) zap.String("error", err.Error()))
return return
} }
@ -1220,9 +1221,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract // bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration. // with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil. // The state is set using the provided setter which MUST NOT be nil.
func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error { func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
ni := c.cfgNodeInfo.localInfo ni := c.cfgNodeInfo.localInfo
ni.SetStatus(state) stateSetter(&ni)
prm := nmClient.AddPeerPrm{} prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni) prm.SetNodeInfo(ni)
@ -1232,7 +1233,9 @@ func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) er
// bootstrapOnline calls cfg.bootstrapWithState with "online" state. // bootstrapOnline calls cfg.bootstrapWithState with "online" state.
func bootstrapOnline(ctx context.Context, c *cfg) error { func bootstrapOnline(ctx context.Context, c *cfg) error {
return c.bootstrapWithState(ctx, netmap.Online) return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Online)
})
} }
// bootstrap calls bootstrapWithState with: // bootstrap calls bootstrapWithState with:
@ -1243,7 +1246,9 @@ func (c *cfg) bootstrap(ctx context.Context) error {
st := c.cfgNetmap.state.controlNetmapStatus() st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE { if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState) c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(ctx, netmap.Maintenance) return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Maintenance)
})
} }
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState, c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
@ -1334,7 +1339,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// Logger // Logger
logPrm := c.loggerPrm() logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
components := c.getComponents(ctx, logPrm) components := c.getComponents(ctx, logPrm)
@ -1457,7 +1466,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider { func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
return container.NewInfoProvider(func() (container.Source, error) { return container.NewInfoProvider(func() (container.Source, error) {
c.initMorphComponents(ctx) c.initMorphComponents(ctx)
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -1,7 +1,6 @@
package config package config
import ( import (
"slices"
"strings" "strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used // It supports only one level of nesting and is intended to be used
// to provide default values. // to provide default values.
func (x *Config) SetDefault(from *Config) { func (x *Config) SetDefault(from *Config) {
x.defaultPath = slices.Clone(from.path) x.defaultPath = make([]string, len(from.path))
copy(x.defaultPath, from.path)
} }

View file

@ -1,27 +0,0 @@
package containerconfig
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
const (
subsection = "container"
listStreamSubsection = "list_stream"
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
ContainerBatchSizeDefault = 1000
)
// ContainerBatchSize returns the value of "batch_size" config parameter
// from "list_stream" subsection of "container" section.
//
// Returns ContainerBatchSizeDefault if the value is missing or if
// the value is not positive integer.
func ContainerBatchSize(c *config.Config) uint32 {
if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
return ContainerBatchSizeDefault
}
size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
if size == 0 {
return ContainerBatchSizeDefault
}
return size
}

View file

@ -1,27 +0,0 @@
package containerconfig_test
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestContainerSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
})
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -41,10 +41,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
c.Sub(si), c.Sub(si),
) )
if sc.Mode() == mode.Disabled {
continue
}
// Path for the blobstor can't be present in the default section, because different shards // Path for the blobstor can't be present in the default section, because different shards
// must have different paths, so if it is missing, the shard is not here. // must have different paths, so if it is missing, the shard is not here.
// At the same time checking for "blobstor" section doesn't work proper // At the same time checking for "blobstor" section doesn't work proper
@ -54,6 +50,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
} }
(*config.Config)(sc).SetDefault(def) (*config.Config)(sc).SetDefault(def)
if sc.Mode() == mode.Disabled {
continue
}
if err := f(sc); err != nil { if err := f(sc); err != nil {
return err return err
} }

View file

@ -18,22 +18,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestIterateShards(t *testing.T) {
fileConfigTest := func(c *config.Config) {
var res []string
require.NoError(t,
engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
res = append(res, sc.Metabase().Path())
return nil
}))
require.Equal(t, []string{"abc", "xyz"}, res)
}
const cfgDir = "./testdata/shards"
configtest.ForEachFileType(cfgDir, fileConfigTest)
configtest.ForEnvFileType(t, cfgDir, fileConfigTest)
}
func TestEngineSection(t *testing.T) { func TestEngineSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) { t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig() empty := configtest.EmptyConfig()

View file

@ -1,3 +0,0 @@
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc
FROSTFS_STORAGE_SHARD_1_MODE=disabled
FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz

View file

@ -1,13 +0,0 @@
{
"storage.shard": {
"0": {
"metabase.path": "abc"
},
"1": {
"mode": "disabled"
},
"2": {
"metabase.path": "xyz"
}
}
}

View file

@ -1,7 +0,0 @@
storage.shard:
0:
metabase.path: abc
1:
mode: disabled
2:
metabase.path: xyz

View file

@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
// //
// Returns PermDefault if the value is not a positive number. // Returns PermDefault if the value is not a positive number.
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
p := config.UintSafe(l.cfg, "perm") p := config.UintSafe((*config.Config)(l.cfg), "perm")
if p == 0 { if p == 0 {
p = PermDefault p = PermDefault
} }
@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
// //
// Returns false if the value is not a boolean. // Returns false if the value is not a boolean.
func (l PersistentPolicyRulesConfig) NoSync() bool { func (l PersistentPolicyRulesConfig) NoSync() bool {
return config.BoolSafe(l.cfg, "no_sync") return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
} }
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode. // CompatibilityMode returns true if need to run node in compatibility with previous versions mode.

View file

@ -1,46 +0,0 @@
package qos
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
const (
subsection = "qos"
criticalSubSection = "critical"
internalSubSection = "internal"
)
// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config
// parameter from "qos" section.
//
// Returns an empty list if not set.
func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys {
return authorizedKeys(c, criticalSubSection)
}
// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config
// parameter from "qos" section.
//
// Returns an empty list if not set.
func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys {
return authorizedKeys(c, internalSubSection)
}
func authorizedKeys(c *config.Config, sub string) keys.PublicKeys {
strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys")
pubs := make(keys.PublicKeys, 0, len(strKeys))
for i := range strKeys {
pub, err := keys.NewPublicKeyFromString(strKeys[i])
if err != nil {
panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err))
}
pubs = append(pubs, pub)
}
return pubs
}

View file

@ -1,40 +0,0 @@
package qos
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestQoSSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Empty(t, CriticalAuthorizedKeys(empty))
require.Empty(t, InternalAuthorizedKeys(empty))
})
const path = "../../../../config/example/node"
criticalPubs := make(keys.PublicKeys, 2)
criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
internalPubs := make(keys.PublicKeys, 2)
internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2")
internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a")
fileConfigTest := func(c *config.Config) {
require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c))
require.Equal(t, internalPubs, InternalAuthorizedKeys(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -5,7 +5,6 @@ import (
"context" "context"
"net" "net"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
@ -29,7 +28,7 @@ import (
func initContainerService(_ context.Context, c *cfg) { func initContainerService(_ context.Context, c *cfg) {
// container wrapper that tries to invoke notary // container wrapper that tries to invoke notary
// requests if chain is configured so // requests if chain is configured so
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
fatalOnErr(err) fatalOnErr(err)
c.shared.cnrClient = wrap c.shared.cnrClient = wrap
@ -43,12 +42,11 @@ func initContainerService(_ context.Context, c *cfg) {
fatalOnErr(err) fatalOnErr(err)
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg) cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { if cacheSize > 0 {
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
} }
c.shared.frostfsidClient = frostfsIDSubjectProvider c.shared.frostfsidClient = frostfsIDSubjectProvider
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
@ -58,9 +56,7 @@ func initContainerService(_ context.Context, c *cfg) {
&c.key.PrivateKey, &c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr, containerService.NewAPEServer(defaultChainRouter, cnrRdr,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
containerService.NewSplitterService( containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
c.cfgContainer.containerBatchSize, c.respSvc,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
), ),
) )
service = containerService.NewAuditService(service, c.log, c.audit) service = containerService.NewAuditService(service, c.log, c.audit)
@ -100,7 +96,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
// TODO: use owner directly from the event after neofs-contract#256 will become resolved // TODO: use owner directly from the event after neofs-contract#256 will become resolved
// but don't forget about the profit of reading the new container and caching it: // but don't forget about the profit of reading the new container and caching it:
// creation success are most commonly tracked by polling GET op. // creation success are most commonly tracked by polling GET op.
cnr, err := cnrSrc.Get(ctx, ev.ID) cnr, err := cnrSrc.Get(ev.ID)
if err == nil { if err == nil {
containerCache.containerCache.set(ev.ID, cnr, nil) containerCache.containerCache.set(ev.ID, cnr, nil)
} else { } else {
@ -221,25 +217,20 @@ type morphContainerReader struct {
src containerCore.Source src containerCore.Source
lister interface { lister interface {
ContainersOf(context.Context, *user.ID) ([]cid.ID, error) ContainersOf(*user.ID) ([]cid.ID, error)
IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
} }
} }
func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) { func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
return x.src.Get(ctx, id) return x.src.Get(id)
} }
func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) { func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
return x.src.DeletionInfo(ctx, id) return x.src.DeletionInfo(id)
} }
func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) { func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
return x.lister.ContainersOf(ctx, id) return x.lister.ContainersOf(id)
}
func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error {
return x.lister.IterateContainersOf(ctx, id, processCID)
} }
type morphContainerWriter struct { type morphContainerWriter struct {

View file

@ -7,12 +7,9 @@ import (
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -53,14 +50,7 @@ func initControlService(ctx context.Context, c *cfg) {
return return
} }
c.cfgControlService.server = grpc.NewServer( c.cfgControlService.server = grpc.NewServer()
grpc.ChainUnaryInterceptor(
qos.NewSetCriticalIOTagUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(),
),
// control service has no stream methods, so no stream interceptors added
)
c.onShutdown(func() { c.onShutdown(func() {
stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"strings" "strings"
"time" "time"
@ -43,7 +42,7 @@ func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int
} }
} }
func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -56,7 +55,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160)
return result.subject, result.err return result.subject, result.err
} }
subj, err := m.subjProvider.GetSubject(ctx, addr) subj, err := m.subjProvider.GetSubject(addr)
if err != nil { if err != nil {
if m.isCacheableError(err) { if m.isCacheableError(err) {
m.subjCache.Add(addr, subjectWithError{ m.subjCache.Add(addr, subjectWithError{
@ -70,7 +69,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160)
return subj, nil return subj, nil
} }
func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -83,7 +82,7 @@ func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.
return result.subject, result.err return result.subject, result.err
} }
subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr) subjExt, err := m.subjProvider.GetSubjectExtended(addr)
if err != nil { if err != nil {
if m.isCacheableError(err) { if m.isCacheableError(err) {
m.subjExtCache.Add(addr, subjectExtWithError{ m.subjExtCache.Add(addr, subjectExtWithError{

View file

@ -12,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
@ -131,12 +130,10 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
serverOpts := []grpc.ServerOption{ serverOpts := []grpc.ServerOption{
grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.MaxRecvMsgSize(maxRecvMsgSize),
grpc.ChainUnaryInterceptor( grpc.ChainUnaryInterceptor(
qos.NewUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(),
), ),
grpc.ChainStreamInterceptor( grpc.ChainStreamInterceptor(
qos.NewStreamServerInterceptor(),
metrics.NewStreamServerInterceptor(), metrics.NewStreamServerInterceptor(),
tracing.NewStreamServerInterceptor(), tracing.NewStreamServerInterceptor(),
), ),

View file

@ -101,7 +101,6 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) })
initAccessPolicyEngine(ctx, c) initAccessPolicyEngine(ctx, c)
initAndLog(ctx, c, "access policy engine", func(c *cfg) { initAndLog(ctx, c, "access policy engine", func(c *cfg) {
@ -135,7 +134,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C
err := stopper(ctx) err := stopper(ctx)
if err != nil { if err != nil {
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name), c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
zap.Error(err), zap.String("error", err.Error()),
) )
} }

View file

@ -35,16 +35,20 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
lookupScriptHashesInNNS(c) // smart contract auto negotiation lookupScriptHashesInNNS(c) // smart contract auto negotiation
if c.cfgMorph.notaryEnabled {
err := c.cfgMorph.client.EnableNotarySupport( err := c.cfgMorph.client.EnableNotarySupport(
client.WithProxyContract( client.WithProxyContract(
c.cfgMorph.proxyScriptHash, c.cfgMorph.proxyScriptHash,
), ),
) )
fatalOnErr(err) fatalOnErr(err)
}
c.log.Info(ctx, logs.FrostFSNodeNotarySupport) c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
)
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0) wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
fatalOnErr(err) fatalOnErr(err)
var netmapSource netmap.Source var netmapSource netmap.Source
@ -96,7 +100,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
if err != nil { if err != nil {
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient, c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses), zap.Any("endpoints", addresses),
zap.Error(err), zap.String("error", err.Error()),
) )
fatalOnErr(err) fatalOnErr(err)
@ -112,9 +116,15 @@ func initMorphClient(ctx context.Context, c *cfg) {
} }
c.cfgMorph.client = cli c.cfgMorph.client = cli
c.cfgMorph.notaryEnabled = cli.ProbeNotary()
} }
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
// skip notary deposit in non-notary environments
if !c.cfgMorph.notaryEnabled {
return
}
tx, vub, err := makeNotaryDeposit(ctx, c) tx, vub, err := makeNotaryDeposit(ctx, c)
fatalOnErr(err) fatalOnErr(err)
@ -151,7 +161,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
} }
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil { if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil {
return err return err
} }
@ -168,7 +178,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil { if err != nil {
fromSideChainBlock = 0 fromSideChainBlock = 0
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
} }
subs, err = subscriber.New(ctx, &subscriber.Params{ subs, err = subscriber.New(ctx, &subscriber.Params{
@ -223,17 +233,27 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse
subs map[event.Type][]event.Handler, subs map[event.Type][]event.Handler,
) { ) {
for typ, handlers := range subs { for typ, handlers := range subs {
pi := event.NotificationParserInfo{}
pi.SetType(typ)
pi.SetScriptHash(scHash)
p, ok := parsers[typ] p, ok := parsers[typ]
if !ok { if !ok {
panic(fmt.Sprintf("missing parser for event %s", typ)) panic(fmt.Sprintf("missing parser for event %s", typ))
} }
lis.RegisterNotificationHandler(event.NotificationHandlerInfo{ pi.SetParser(p)
Contract: scHash,
Type: typ, lis.SetNotificationParser(pi)
Parser: p,
Handlers: handlers, for _, h := range handlers {
}) hi := event.NotificationHandlerInfo{}
hi.SetType(typ)
hi.SetScriptHash(scHash)
hi.SetHandler(h)
lis.RegisterNotificationHandler(hi)
}
} }
} }
@ -262,6 +282,10 @@ func lookupScriptHashesInNNS(c *cfg) {
) )
for _, t := range targets { for _, t := range targets {
if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
continue // ignore proxy contract if notary disabled
}
if emptyHash.Equals(*t.h) { if emptyHash.Equals(*t.h) {
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName) *t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err) fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)

View file

@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
} }
} }
s.setControlNetmapStatus(ctrlNetSt) s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
} }
// sets the current node state to the given value. Subsequent cfg.bootstrap // sets the current node state to the given value. Subsequent cfg.bootstrap
@ -193,15 +193,17 @@ func addNewEpochNotificationHandlers(c *cfg) {
} }
}) })
if c.cfgMorph.notaryEnabled {
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
_, _, err := makeNotaryDeposit(ctx, c) _, _, err := makeNotaryDeposit(ctx, c)
if err != nil { if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.Error(err), zap.String("error", err.Error()),
) )
} }
}) })
} }
}
// bootstrapNode adds current node to the Network map. // bootstrapNode adds current node to the Network map.
// Must be called after initNetmapService. // Must be called after initNetmapService.
@ -239,7 +241,7 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
// initNetmapState inits current Network map state. // initNetmapState inits current Network map state.
// Must be called after Morph components initialization. // Must be called after Morph components initialization.
func initNetmapState(ctx context.Context, c *cfg) { func initNetmapState(ctx context.Context, c *cfg) {
epoch, err := c.cfgNetmap.wrapper.Epoch(ctx) epoch, err := c.cfgNetmap.wrapper.Epoch()
fatalOnErrDetails("could not initialize current epoch number", err) fatalOnErrDetails("could not initialize current epoch number", err)
var ni *netmapSDK.NodeInfo var ni *netmapSDK.NodeInfo
@ -278,7 +280,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
} }
func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx) nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -291,7 +293,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
} }
} }
node, err := c.netmapLocalNodeState(ctx, epoch) node, err := c.netmapLocalNodeState(epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -312,9 +314,9 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
return candidate, nil return candidate, nil
} }
func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
// calculate current network state // calculate current network state
nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch) nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -376,8 +378,8 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro
return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
} }
func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) { func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
epoch, err := c.netMapSource.Epoch(ctx) epoch, err := c.netMapSource.Epoch()
if err != nil { if err != nil {
return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err) return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err)
} }
@ -390,7 +392,7 @@ func (c *cfg) ForceMaintenance(ctx context.Context) error {
} }
func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx) netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
if err != nil { if err != nil {
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
} else if !netSettings.MaintenanceModeAllowed { } else if !netSettings.MaintenanceModeAllowed {
@ -423,7 +425,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.
if err != nil { if err != nil {
return err return err
} }
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash) return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res)
} }
type netInfo struct { type netInfo struct {
@ -438,7 +440,7 @@ type netInfo struct {
msPerBlockRdr func() (int64, error) msPerBlockRdr func() (int64, error)
} }
func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) { func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
magic, err := n.magic.MagicNumber() magic, err := n.magic.MagicNumber()
if err != nil { if err != nil {
return nil, err return nil, err
@ -448,7 +450,7 @@ func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.Net
ni.SetCurrentEpoch(n.netState.CurrentEpoch()) ni.SetCurrentEpoch(n.netState.CurrentEpoch())
ni.SetMagicNumber(magic) ni.SetMagicNumber(magic)
netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx) netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration()
if err != nil { if err != nil {
return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err) return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err)
} }

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@ -54,11 +55,11 @@ type objectSvc struct {
patch *patchsvc.Service patch *patchsvc.Service
} }
func (c *cfg) MaxObjectSize(ctx context.Context) uint64 { func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx) sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil { if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.Error(err), zap.String("error", err.Error()),
) )
} }
@ -122,8 +123,8 @@ type innerRingFetcherWithNotary struct {
sidechain *morphClient.Client sidechain *morphClient.Client
} }
func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) { func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
keys, err := fn.sidechain.NeoFSAlphabetList(ctx) keys, err := fn.sidechain.NeoFSAlphabetList()
if err != nil { if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err) return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err)
} }
@ -136,6 +137,24 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]by
return result, nil return result, nil
} }
type innerRingFetcherWithoutNotary struct {
nm *nmClient.Client
}
func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
keys, err := f.nm.GetInnerRingList()
if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
}
result := make([][]byte, 0, len(keys))
for i := range keys {
result = append(result, keys[i].Bytes())
}
return result, nil
}
func initObjectService(c *cfg) { func initObjectService(c *cfg) {
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state) keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
@ -168,7 +187,7 @@ func initObjectService(c *cfg) {
sPatch := createPatchSvc(sGet, sPut) sPatch := createPatchSvc(sGet, sPut)
// build service pipeline // build service pipeline
// grpc | audit | qos | <metrics> | signature | response | acl | ape | split // grpc | audit | <metrics> | signature | response | acl | ape | split
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
@ -191,8 +210,7 @@ func initObjectService(c *cfg) {
c.shared.metricsSvc = objectService.NewMetricCollector( c.shared.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService) auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit)
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc) server := objectTransportGRPC.New(auditSvc)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
@ -216,7 +234,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
prm.MarkAsGarbage(addr) prm.MarkAsGarbage(addr)
prm.WithForceRemoval() prm.WithForceRemoval()
return ls.Inhume(ctx, prm) _, err := ls.Inhume(ctx, prm)
return err
} }
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
@ -266,9 +285,10 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
var inhumePrm engine.InhumePrm var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr) inhumePrm.MarkAsGarbage(addr)
if err := ls.Inhume(ctx, inhumePrm); err != nil { _, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.Error(err), zap.String("error", err.Error()),
) )
} }
}), }),
@ -285,10 +305,15 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
} }
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
if c.cfgMorph.client.ProbeNotary() {
return &innerRingFetcherWithNotary{ return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client, sidechain: c.cfgMorph.client,
} }
} }
return &innerRingFetcherWithoutNotary{
nm: c.cfgNetmap.wrapper,
}
}
func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCache) *replicator.Replicator { func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCache) *replicator.Replicator {
ls := c.cfgObject.cfgLocalStorage.localStorage ls := c.cfgObject.cfgLocalStorage.localStorage
@ -475,7 +500,8 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
prm.WithTarget(tombstone, addrs...) prm.WithTarget(tombstone, addrs...)
return e.engine.Inhume(ctx, prm) _, err := e.engine.Inhume(ctx, prm)
return err
} }
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {

View file

@ -1,95 +0,0 @@
package main
import (
"bytes"
"context"
qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
)
type cfgQoSService struct {
netmapSource netmap.Source
logger *logger.Logger
allowedCriticalPubs [][]byte
allowedInternalPubs [][]byte
}
func initQoSService(c *cfg) {
criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg)
internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg)
rawCriticalPubs := make([][]byte, 0, len(criticalPubs))
rawInternalPubs := make([][]byte, 0, len(internalPubs))
for i := range criticalPubs {
rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes())
}
for i := range internalPubs {
rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes())
}
c.cfgQoSService = cfgQoSService{
netmapSource: c.netMapSource,
logger: c.log,
allowedCriticalPubs: rawCriticalPubs,
allowedInternalPubs: rawInternalPubs,
}
}
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
rawTag, defined := qosTagging.IOTagFromContext(ctx)
if !defined {
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
ioTag, err := qos.FromRawString(rawTag)
if err != nil {
s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
switch ioTag {
case qos.IOTagClient:
return ctx
case qos.IOTagCritical:
for _, pk := range s.allowedCriticalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
case qos.IOTagInternal:
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
default:
s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
}

View file

@ -29,16 +29,16 @@ type cnrSource struct {
cli *containerClient.Client cli *containerClient.Client
} }
func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) { func (c cnrSource) Get(id cid.ID) (*container.Container, error) {
return c.src.Get(ctx, id) return c.src.Get(id)
} }
func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) { func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) {
return c.src.DeletionInfo(ctx, cid) return c.src.DeletionInfo(cid)
} }
func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) { func (c cnrSource) List() ([]cid.ID, error) {
return c.cli.ContainersOf(ctx, nil) return c.cli.ContainersOf(nil)
} }
func initTreeService(c *cfg) { func initTreeService(c *cfg) {
@ -72,7 +72,7 @@ func initTreeService(c *cfg) {
) )
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService)) tree.RegisterTreeServiceServer(s, c.treeService)
}) })
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
@ -113,7 +113,7 @@ func initTreeService(c *cfg) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID), zap.Stringer("cid", ev.ID),
zap.Error(err)) zap.String("error", err.Error()))
} }
}) })

View file

@ -83,9 +83,6 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
FROSTFS_REPLICATOR_POOL_SIZE=10 FROSTFS_REPLICATOR_POOL_SIZE=10
# Container service section
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
# Object service section # Object service section
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100 FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200 FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
@ -225,6 +222,3 @@ FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
FROSTFS_MULTINET_BALANCER=roundrobin FROSTFS_MULTINET_BALANCER=roundrobin
FROSTFS_MULTINET_RESTRICT=false FROSTFS_MULTINET_RESTRICT=false
FROSTFS_MULTINET_FALLBACK_DELAY=350ms FROSTFS_MULTINET_FALLBACK_DELAY=350ms
FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"

View file

@ -124,11 +124,6 @@
"pool_size": 10, "pool_size": 10,
"put_timeout": "15s" "put_timeout": "15s"
}, },
"container": {
"list_stream": {
"batch_size": "500"
}
},
"object": { "object": {
"delete": { "delete": {
"tombstone_lifetime": 10 "tombstone_lifetime": 10
@ -305,19 +300,5 @@
"balancer": "roundrobin", "balancer": "roundrobin",
"restrict": false, "restrict": false,
"fallback_delay": "350ms" "fallback_delay": "350ms"
},
"qos": {
"critical": {
"authorized_keys": [
"035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11",
"028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
]
},
"internal": {
"authorized_keys": [
"02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2",
"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"
]
}
} }
} }

View file

@ -79,8 +79,7 @@ contracts: # side chain NEOFS contract script hashes; optional, override values
morph: morph:
dial_timeout: 30s # timeout for side chain NEO RPC client connection dial_timeout: 30s # timeout for side chain NEO RPC client connection
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
# Negative value disables caching. A zero value sets the default value.
# Default value: block time. It is recommended to have this value less or equal to block time. # Default value: block time. It is recommended to have this value less or equal to block time.
# Cached entities: containers, container lists, eACL tables. # Cached entities: containers, container lists, eACL tables.
container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache. container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
@ -109,10 +108,6 @@ replicator:
put_timeout: 15s # timeout for the Replicator PUT remote operation put_timeout: 15s # timeout for the Replicator PUT remote operation
pool_size: 10 # maximum amount of concurrent replications pool_size: 10 # maximum amount of concurrent replications
container:
list_stream:
batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once
object: object:
delete: delete:
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
@ -270,13 +265,3 @@ multinet:
balancer: roundrobin balancer: roundrobin
restrict: false restrict: false
fallback_delay: 350ms fallback_delay: 350ms
qos:
critical:
authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
internal:
authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag
- 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2
- 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a

View file

@ -42,6 +42,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json",
"FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080",
@ -97,6 +98,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json",
"FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082",
@ -152,6 +154,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json",
"FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084",
@ -207,6 +210,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json",
"FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086",

View file

@ -95,15 +95,19 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release ## Post-release
### Prepare and push images to a Docker registry (automated) ### Prepare and push images to a Docker Hub (if not automated)
Create Docker images for all applications and push them into container registry Create Docker images for all applications and push them into Docker Hub
(executed automatically in Forgejo Actions upon pushing a release tag): (requires [organization](https://hub.docker.com/u/truecloudlab) privileges)
```shell ```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} $ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images $ make images
$ make push-images $ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
``` ```
### Make a proper release (if not automated) ### Make a proper release (if not automated)

View file

@ -27,7 +27,6 @@ There are some custom types used for brevity:
| `runtime` | [Runtime configuration](#runtime-section) | | `runtime` | [Runtime configuration](#runtime-section) |
| `audit` | [Audit configuration](#audit-section) | | `audit` | [Audit configuration](#audit-section) |
| `multinet` | [Multinet configuration](#multinet-section) | | `multinet` | [Multinet configuration](#multinet-section) |
| `qos` | [QoS configuration](#qos-section) |
# `control` section # `control` section
```yaml ```yaml
@ -472,20 +471,3 @@ multinet:
| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". | | `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. | | `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. | | `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
# `qos` section
```yaml
qos:
critical:
authorized_keys:
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
internal:
authorized_keys:
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
```
| Parameter | Type | Default value | Description |
| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- |
| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. |
| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. |

35
go.mod
View file

@ -4,12 +4,11 @@ go 1.22
require ( require (
code.gitea.io/sdk/gitea v0.17.1 code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421
git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
@ -28,7 +27,7 @@ require (
github.com/klauspost/compress v1.17.4 github.com/klauspost/compress v1.17.4
github.com/mailru/easyjson v0.7.7 github.com/mailru/easyjson v0.7.7
github.com/mr-tron/base58 v1.2.0 github.com/mr-tron/base58 v1.2.0
github.com/multiformats/go-multiaddr v0.14.0 github.com/multiformats/go-multiaddr v0.12.1
github.com/nspcc-dev/neo-go v0.106.3 github.com/nspcc-dev/neo-go v0.106.3
github.com/olekukonko/tablewriter v0.0.5 github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0 github.com/panjf2000/ants/v2 v2.9.0
@ -41,15 +40,15 @@ require (
github.com/ssgreg/journald v1.0.0 github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.10 go.etcd.io/bbolt v1.3.10
go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/trace v1.31.0 go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/zap v1.27.0 go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/sync v0.10.0 golang.org/x/sync v0.7.0
golang.org/x/sys v0.28.0 golang.org/x/sys v0.22.0
golang.org/x/term v0.27.0 golang.org/x/term v0.21.0
google.golang.org/grpc v1.69.2 google.golang.org/grpc v1.66.2
google.golang.org/protobuf v1.36.1 google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )
@ -120,15 +119,15 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.30.0 // indirect golang.org/x/net v0.26.0 // indirect
golang.org/x/text v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.2.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect

74
go.sum
View file

@ -1,17 +1,15 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k= git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4 h1:o3iqVmbvFsfe8kpB2Hvuix6Q/tAhbiPLP91xK4lmoBQ=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 h1:sepm9FeuoInmygH1K/+3L+Yp5bJhGiVi/oGCH6Emp2c=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 h1:pP19IawSdsLCKFv7HMNfWAeH6E3uSnntKZkwka+/2+4=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
@ -108,8 +106,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -192,8 +188,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk=
github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
@ -294,22 +290,20 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@ -324,8 +318,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -345,16 +339,16 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -381,16 +375,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -398,8 +392,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
@ -412,12 +406,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -426,8 +420,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View file

@ -146,6 +146,7 @@ const (
ClientCantGetBlockchainHeight = "can't get blockchain height" ClientCantGetBlockchainHeight = "can't get blockchain height"
ClientCantGetBlockchainHeight243 = "can't get blockchain height" ClientCantGetBlockchainHeight243 = "can't get blockchain height"
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
EventCouldNotStartListenToEvents = "could not start listen to events"
EventStopEventListenerByError = "stop event listener by error" EventStopEventListenerByError = "stop event listener by error"
EventStopEventListenerByContext = "stop event listener by context" EventStopEventListenerByContext = "stop event listener by context"
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
@ -163,9 +164,17 @@ const (
EventNotaryParserNotSet = "notary parser not set" EventNotaryParserNotSet = "notary parser not set"
EventCouldNotParseNotaryEvent = "could not parse notary event" EventCouldNotParseNotaryEvent = "could not parse notary event"
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
EventIgnoreNilEventParser = "ignore nil event parser"
EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser"
EventRegisteredNewEventParser = "registered new event parser" EventRegisteredNewEventParser = "registered new event parser"
EventIgnoreNilEventHandler = "ignore nil event handler"
EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser"
EventRegisteredNewEventHandler = "registered new event handler" EventRegisteredNewEventHandler = "registered new event handler"
EventIgnoreNilNotaryEventParser = "ignore nil notary event parser"
EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser"
EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler"
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
EventIgnoreNilBlockHandler = "ignore nil block handler"
StorageOperation = "local object storage operation" StorageOperation = "local object storage operation"
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
BlobovniczaOpeningBoltDB = "opening BoltDB" BlobovniczaOpeningBoltDB = "opening BoltDB"
@ -383,6 +392,7 @@ const (
FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown" FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown"
FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing" FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing"
FrostFSNodeConfigurationReading = "configuration reading" FrostFSNodeConfigurationReading = "configuration reading"
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation"
FrostFSNodeTracingConfigationUpdated = "tracing configation updated" FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
FrostFSNodePoolConfigurationUpdate = "adjust pool configuration" FrostFSNodePoolConfigurationUpdate = "adjust pool configuration"
@ -510,7 +520,4 @@ const (
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file" BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
WritecacheCantGetObject = "can't get an object from fstree" WritecacheCantGetObject = "can't get an object from fstree"
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
) )

View file

@ -12,7 +12,6 @@ type ApplicationInfo struct {
func NewApplicationInfo(version string) *ApplicationInfo { func NewApplicationInfo(version string) *ApplicationInfo {
appInfo := &ApplicationInfo{ appInfo := &ApplicationInfo{
versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{ versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "app_info", Name: "app_info",
Help: "General information about the application.", Help: "General information about the application.",
}, []string{"version"}), }, []string{"version"}),

View file

@ -1,51 +0,0 @@
package qos
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"google.golang.org/grpc"
)
func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor {
return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String())
return handler(ctx, req)
}
}
func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor {
return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
rawTag, ok := tagging.IOTagFromContext(ctx)
if !ok {
return invoker(ctx, method, req, reply, cc, opts...)
}
tag, err := FromRawString(rawTag)
if err != nil {
tag = IOTagClient
}
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal
}
ctx = tagging.ContextWithIOTag(ctx, tag.String())
return invoker(ctx, method, req, reply, cc, opts...)
}
}
func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor {
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
rawTag, ok := tagging.IOTagFromContext(ctx)
if !ok {
return streamer(ctx, desc, cc, method, opts...)
}
tag, err := FromRawString(rawTag)
if err != nil {
tag = IOTagClient
}
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal
}
ctx = tagging.ContextWithIOTag(ctx, tag.String())
return streamer(ctx, desc, cc, method, opts...)
}
}

View file

@ -1,39 +0,0 @@
package qos
import "fmt"
type IOTag string
const (
IOTagClient IOTag = "client"
IOTagInternal IOTag = "internal"
IOTagBackground IOTag = "background"
IOTagWritecache IOTag = "writecache"
IOTagPolicer IOTag = "policer"
IOTagCritical IOTag = "critical"
ioTagUnknown IOTag = ""
)
func FromRawString(s string) (IOTag, error) {
switch s {
case string(IOTagCritical):
return IOTagCritical, nil
case string(IOTagClient):
return IOTagClient, nil
case string(IOTagInternal):
return IOTagInternal, nil
case string(IOTagBackground):
return IOTagBackground, nil
case string(IOTagWritecache):
return IOTagWritecache, nil
case string(IOTagPolicer):
return IOTagPolicer, nil
default:
return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
}
}
func (t IOTag) String() string {
return string(t)
}

View file

@ -31,7 +31,9 @@ type RPCActorProvider interface {
type ProxyVerificationContractStorage struct { type ProxyVerificationContractStorage struct {
rpcActorProvider RPCActorProvider rpcActorProvider RPCActorProvider
cosigners []actor.SignerAccount acc *wallet.Account
proxyScriptHash util.Uint160
policyScriptHash util.Uint160 policyScriptHash util.Uint160
} }
@ -39,27 +41,12 @@ type ProxyVerificationContractStorage struct {
var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil) var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil)
func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage { func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage {
acc := wallet.NewAccountFromPrivateKey(key)
return &ProxyVerificationContractStorage{ return &ProxyVerificationContractStorage{
rpcActorProvider: rpcActorProvider, rpcActorProvider: rpcActorProvider,
cosigners: []actor.SignerAccount{ acc: wallet.NewAccountFromPrivateKey(key),
{
Signer: transaction.Signer{ proxyScriptHash: proxyScriptHash,
Account: proxyScriptHash,
Scopes: transaction.CustomContracts,
AllowedContracts: []util.Uint160{policyScriptHash},
},
Account: notary.FakeContractAccount(proxyScriptHash),
},
{
Signer: transaction.Signer{
Account: acc.Contract.ScriptHash(),
Scopes: transaction.CalledByEntry,
},
Account: acc,
},
},
policyScriptHash: policyScriptHash, policyScriptHash: policyScriptHash,
} }
@ -77,7 +64,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke {
func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) { func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) {
rpcActor := contractStorage.rpcActorProvider.GetRPCActor() rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
act, err := actor.New(rpcActor, contractStorage.cosigners) act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -111,16 +98,31 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na
// ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners. // ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners.
func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) { func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
rpcActor := contractStorage.rpcActorProvider.GetRPCActor() // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor} // ProxyVerificationContractStorage does not manage reconnections.
return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) contractStorageActor, err := contractStorage.newContractStorageActor()
if err != nil {
return nil, err
}
return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
} }
type invokerAdapter struct { func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount {
*invoker.Invoker return []actor.SignerAccount{
rpcInvoker invoker.RPCInvoke {
Signer: transaction.Signer{
Account: proxyScriptHash,
Scopes: transaction.CustomContracts,
AllowedContracts: []util.Uint160{policyScriptHash},
},
Account: notary.FakeContractAccount(proxyScriptHash),
},
{
Signer: transaction.Signer{
Account: acc.Contract.ScriptHash(),
Scopes: transaction.CalledByEntry,
},
Account: acc,
},
} }
func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
return n.rpcInvoker
} }

View file

@ -1,7 +1,6 @@
package request package request
import ( import (
"context"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
@ -13,9 +12,9 @@ import (
) )
// FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID. // FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID.
func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) {
reqProps := make(map[string]string) reqProps := make(map[string]string)
subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash())
if err != nil { if err != nil {
if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
return nil, fmt.Errorf("get subject error: %w", err) return nil, fmt.Errorf("get subject error: %w", err)
@ -37,8 +36,8 @@ func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfs
} }
// Groups return the actor's group ids from frostfsid contract. // Groups return the actor's group ids from frostfsid contract.
func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { func Groups(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) {
subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash())
if err != nil { if err != nil {
if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
return nil, fmt.Errorf("get subject error: %w", err) return nil, fmt.Errorf("get subject error: %w", err)

View file

@ -1,7 +1,6 @@
package container package container
import ( import (
"context"
"sync" "sync"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
@ -20,7 +19,7 @@ type infoValue struct {
} }
type InfoProvider interface { type InfoProvider interface {
Info(ctx context.Context, id cid.ID) (Info, error) Info(id cid.ID) (Info, error)
} }
type infoProvider struct { type infoProvider struct {
@ -44,13 +43,13 @@ func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider {
} }
} }
func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) { func (r *infoProvider) Info(id cid.ID) (Info, error) {
v, found := r.tryGetFromCache(id) v, found := r.tryGetFromCache(id)
if found { if found {
return v.info, v.err return v.info, v.err
} }
return r.getFromSource(ctx, id) return r.getFromSource(id)
} }
func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
@ -61,7 +60,7 @@ func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
return value, found return value, found
} }
func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) { func (r *infoProvider) getFromSource(id cid.ID) (Info, error) {
r.kl.Lock(id) r.kl.Lock(id)
defer r.kl.Unlock(id) defer r.kl.Unlock(id)
@ -76,11 +75,11 @@ func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, erro
return Info{}, r.sourceErr return Info{}, r.sourceErr
} }
cnr, err := r.source.Get(ctx, id) cnr, err := r.source.Get(id)
var civ infoValue var civ infoValue
if err != nil { if err != nil {
if client.IsErrContainerNotFound(err) { if client.IsErrContainerNotFound(err) {
removed, err := WasRemoved(ctx, r.source, id) removed, err := WasRemoved(r.source, id)
if err != nil { if err != nil {
civ.err = err civ.err = err
} else { } else {

View file

@ -1,8 +1,6 @@
package container package container
import ( import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
@ -43,9 +41,9 @@ type Source interface {
// //
// Implementations must not retain the container pointer and modify // Implementations must not retain the container pointer and modify
// the container through it. // the container through it.
Get(ctx context.Context, cid cid.ID) (*Container, error) Get(cid.ID) (*Container, error)
DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error) DeletionInfo(cid.ID) (*DelInfo, error)
} }
// EACL groups information about the FrostFS container's extended ACL stored in // EACL groups information about the FrostFS container's extended ACL stored in

View file

@ -1,7 +1,6 @@
package container package container
import ( import (
"context"
"errors" "errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -11,8 +10,8 @@ import (
// WasRemoved checks whether the container ever existed or // WasRemoved checks whether the container ever existed or
// it just has not been created yet at the current epoch. // it just has not been created yet at the current epoch.
func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { func WasRemoved(s Source, cid cid.ID) (bool, error) {
_, err := s.DeletionInfo(ctx, cid) _, err := s.DeletionInfo(cid)
if err == nil { if err == nil {
return true, nil return true, nil
} }

View file

@ -1,8 +1,6 @@
package frostfsid package frostfsid
import ( import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
) )
@ -13,6 +11,6 @@ const (
// SubjectProvider interface provides methods to get subject from FrostfsID contract. // SubjectProvider interface provides methods to get subject from FrostfsID contract.
type SubjectProvider interface { type SubjectProvider interface {
GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) GetSubject(util.Uint160) (*client.Subject, error)
GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) GetSubjectExtended(util.Uint160) (*client.SubjectExtended, error)
} }

View file

@ -1,8 +1,6 @@
package netmap package netmap
import ( import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
) )
@ -18,7 +16,7 @@ type Source interface {
// //
// Implementations must not retain the network map pointer and modify // Implementations must not retain the network map pointer and modify
// the network map through it. // the network map through it.
GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) GetNetMap(diff uint64) (*netmap.NetMap, error)
// GetNetMapByEpoch reads network map by the epoch number from the storage. // GetNetMapByEpoch reads network map by the epoch number from the storage.
// It returns the pointer to the requested network map and any error encountered. // It returns the pointer to the requested network map and any error encountered.
@ -27,21 +25,21 @@ type Source interface {
// //
// Implementations must not retain the network map pointer and modify // Implementations must not retain the network map pointer and modify
// the network map through it. // the network map through it.
GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error)
// Epoch reads the current epoch from the storage. // Epoch reads the current epoch from the storage.
// It returns thw number of the current epoch and any error encountered. // It returns thw number of the current epoch and any error encountered.
// //
// Must return exactly one non-default value. // Must return exactly one non-default value.
Epoch(ctx context.Context) (uint64, error) Epoch() (uint64, error)
} }
// GetLatestNetworkMap requests and returns the latest network map from the storage. // GetLatestNetworkMap requests and returns the latest network map from the storage.
func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) {
return src.GetNetMap(ctx, 0) return src.GetNetMap(0)
} }
// GetPreviousNetworkMap requests and returns previous from the latest network map from the storage. // GetPreviousNetworkMap requests and returns previous from the latest network map from the storage.
func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) {
return src.GetNetMap(ctx, 1) return src.GetNetMap(1)
} }

View file

@ -199,7 +199,7 @@ func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSD
cnrIDBin := make([]byte, sha256.Size) cnrIDBin := make([]byte, sha256.Size)
cnrID.Encode(cnrIDBin) cnrID.Encode(cnrIDBin)
cnr, err := v.containers.Get(ctx, cnrID) cnr, err := v.containers.Get(cnrID)
if err != nil { if err != nil {
return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err) return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
} }

View file

@ -578,7 +578,7 @@ type testIRSource struct {
irNodes [][]byte irNodes [][]byte
} }
func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) { func (s *testIRSource) InnerRingKeys() ([][]byte, error) {
return s.irNodes, nil return s.irNodes, nil
} }
@ -586,14 +586,14 @@ type testContainerSource struct {
containers map[cid.ID]*container.Container containers map[cid.ID]*container.Container
} }
func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found { if cnr, found := s.containers[cnrID]; found {
return cnr, nil return cnr, nil
} }
return nil, fmt.Errorf("container not found") return nil, fmt.Errorf("container not found")
} }
func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
return nil, nil return nil, nil
} }
@ -602,20 +602,20 @@ type testNetmapSource struct {
currentEpoch uint64 currentEpoch uint64
} }
func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch { if diff >= s.currentEpoch {
return nil, fmt.Errorf("invalid diff") return nil, fmt.Errorf("invalid diff")
} }
return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) return s.GetNetMapByEpoch(s.currentEpoch - diff)
} }
func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found { if nm, found := s.netmaps[epoch]; found {
return nm, nil return nm, nil
} }
return nil, fmt.Errorf("netmap not found") return nil, fmt.Errorf("netmap not found")
} }
func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) { func (s *testNetmapSource) Epoch() (uint64, error) {
return s.currentEpoch, nil return s.currentEpoch, nil
} }

View file

@ -18,7 +18,7 @@ import (
) )
type InnerRing interface { type InnerRing interface {
InnerRingKeys(ctx context.Context) ([][]byte, error) InnerRingKeys() ([][]byte, error)
} }
type SenderClassifier struct { type SenderClassifier struct {
@ -63,11 +63,11 @@ func (c SenderClassifier) Classify(
} }
func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes) isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil { if err != nil {
// do not throw error, try best case matching // do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
zap.Error(err)) zap.String("error", err.Error()))
} else if isInnerRingNode { } else if isInnerRingNode {
return &ClassifyResult{ return &ClassifyResult{
Role: acl.RoleInnerRing, Role: acl.RoleInnerRing,
@ -78,13 +78,13 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
binCnr := make([]byte, sha256.Size) binCnr := make([]byte, sha256.Size)
idCnr.Encode(binCnr) idCnr.Encode(binCnr)
isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr) isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr)
if err != nil { if err != nil {
// error might happen if request has `RoleOther` key and placement // error might happen if request has `RoleOther` key and placement
// is not possible for previous epoch, so // is not possible for previous epoch, so
// do not throw error, try best case matching // do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode, c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
zap.Error(err)) zap.String("error", err.Error()))
} else if isContainerNode { } else if isContainerNode {
return &ClassifyResult{ return &ClassifyResult{
Role: acl.RoleContainer, Role: acl.RoleContainer,
@ -99,8 +99,8 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
}, nil }, nil
} }
func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) { func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
innerRingKeys, err := c.innerRing.InnerRingKeys(ctx) innerRingKeys, err := c.innerRing.InnerRingKeys()
if err != nil { if err != nil {
return false, err return false, err
} }
@ -116,11 +116,10 @@ func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (boo
} }
func (c SenderClassifier) isContainerKey( func (c SenderClassifier) isContainerKey(
ctx context.Context,
owner, idCnr []byte, owner, idCnr []byte,
cnr container.Container, cnr container.Container,
) (bool, error) { ) (bool, error) {
nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap
if err != nil { if err != nil {
return false, err return false, err
} }
@ -134,7 +133,7 @@ func (c SenderClassifier) isContainerKey(
// then check previous netmap, this can happen in-between epoch change // then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container // when node migrates data from last epoch container
nm, err = core.GetPreviousNetworkMap(ctx, c.netmap) nm, err = core.GetPreviousNetworkMap(c.netmap)
if err != nil { if err != nil {
return false, err return false, err
} }

View file

@ -8,6 +8,7 @@ type (
// ContractProcessor interface defines functions for binding event producers // ContractProcessor interface defines functions for binding event producers
// such as event.Listener and Timers with contract processor. // such as event.Listener and Timers with contract processor.
ContractProcessor interface { ContractProcessor interface {
ListenerNotificationParsers() []event.NotificationParserInfo
ListenerNotificationHandlers() []event.NotificationHandlerInfo ListenerNotificationHandlers() []event.NotificationHandlerInfo
ListenerNotaryParsers() []event.NotaryParserInfo ListenerNotaryParsers() []event.NotaryParserInfo
ListenerNotaryHandlers() []event.NotaryHandlerInfo ListenerNotaryHandlers() []event.NotaryHandlerInfo
@ -15,6 +16,11 @@ type (
) )
func connectListenerWithProcessor(l event.Listener, p ContractProcessor) { func connectListenerWithProcessor(l event.Listener, p ContractProcessor) {
// register notification parsers
for _, parser := range p.ListenerNotificationParsers() {
l.SetNotificationParser(parser)
}
// register notification handlers // register notification handlers
for _, handler := range p.ListenerNotificationHandlers() { for _, handler := range p.ListenerNotificationHandlers() {
l.RegisterNotificationHandler(handler) l.RegisterNotificationHandler(handler)

View file

@ -1,8 +1,6 @@
package innerring package innerring
import ( import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@ -49,12 +47,12 @@ type IrFetcherWithoutNotary struct {
// InnerRingKeys fetches list of innerring keys from NeoFSAlphabet // InnerRingKeys fetches list of innerring keys from NeoFSAlphabet
// role in the sidechain. // role in the sidechain.
func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) {
return fN.cli.NeoFSAlphabetList(ctx) return fN.cli.NeoFSAlphabetList()
} }
// InnerRingKeys fetches list of innerring keys from netmap contract // InnerRingKeys fetches list of innerring keys from netmap contract
// in the sidechain. // in the sidechain.
func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) {
return f.nm.GetInnerRingList(ctx) return f.nm.GetInnerRingList()
} }

View file

@ -1,7 +1,6 @@
package innerring package innerring
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
"time" "time"
@ -11,7 +10,7 @@ import (
type ( type (
irFetcher interface { irFetcher interface {
InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) InnerRingKeys() (keys.PublicKeys, error)
} }
committeeFetcher interface { committeeFetcher interface {
@ -46,7 +45,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK
} }
} }
func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) { func (s *innerRingIndexer) update() (ind indexes, err error) {
s.RLock() s.RLock()
if time.Since(s.lastAccess) < s.timeout { if time.Since(s.lastAccess) < s.timeout {
@ -63,7 +62,7 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error)
return s.ind, nil return s.ind, nil
} }
innerRing, err := s.irFetcher.InnerRingKeys(ctx) innerRing, err := s.irFetcher.InnerRingKeys()
if err != nil { if err != nil {
return indexes{}, err return indexes{}, err
} }
@ -82,8 +81,8 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error)
return s.ind, nil return s.ind, nil
} }
func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) { func (s *innerRingIndexer) InnerRingIndex() (int32, error) {
ind, err := s.update(ctx) ind, err := s.update()
if err != nil { if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err) return 0, fmt.Errorf("can't update index state: %w", err)
} }
@ -91,8 +90,8 @@ func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) {
return ind.innerRingIndex, nil return ind.innerRingIndex, nil
} }
func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) { func (s *innerRingIndexer) InnerRingSize() (int32, error) {
ind, err := s.update(ctx) ind, err := s.update()
if err != nil { if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err) return 0, fmt.Errorf("can't update index state: %w", err)
} }
@ -100,8 +99,8 @@ func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) {
return ind.innerRingSize, nil return ind.innerRingSize, nil
} }
func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) { func (s *innerRingIndexer) AlphabetIndex() (int32, error) {
ind, err := s.update(ctx) ind, err := s.update()
if err != nil { if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err) return 0, fmt.Errorf("can't update index state: %w", err)
} }

View file

@ -1,7 +1,6 @@
package innerring package innerring
import ( import (
"context"
"fmt" "fmt"
"sync/atomic" "sync/atomic"
"testing" "testing"
@ -38,15 +37,15 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(1), idx, "invalid alphabet index") require.Equal(t, int32(1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(2), idx, "invalid IR index") require.Equal(t, int32(2), idx, "invalid IR index")
size, err := indexer.InnerRingSize(context.Background()) size, err := indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size") require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(3), size, "invalid IR size") require.Equal(t, int32(3), size, "invalid IR size")
}) })
@ -57,11 +56,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(0), idx, "invalid IR index") require.Equal(t, int32(0), idx, "invalid IR index")
}) })
@ -72,11 +71,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(0), idx, "invalid alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index") require.Equal(t, int32(-1), idx, "invalid IR index")
}) })
@ -101,30 +100,30 @@ func TestIndexerCachesIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index") require.Equal(t, int32(-1), idx, "invalid IR index")
size, err := indexer.InnerRingSize(context.Background()) size, err := indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size") require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count")
require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count")
idx, err = indexer.AlphabetIndex(context.Background()) idx, err = indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index") require.Equal(t, int32(-1), idx, "invalid IR index")
size, err = indexer.InnerRingSize(context.Background()) size, err = indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size") require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
@ -133,15 +132,15 @@ func TestIndexerCachesIndexes(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
idx, err = indexer.AlphabetIndex(context.Background()) idx, err = indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index") require.Equal(t, int32(-1), idx, "invalid IR index")
size, err = indexer.InnerRingSize(context.Background()) size, err = indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size") require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
@ -166,15 +165,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed") require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index") require.Equal(t, int32(0), idx, "invalid IR index")
size, err := indexer.InnerRingSize(context.Background()) size, err := indexer.InnerRingSize()
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
@ -190,15 +189,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer = newInnerRingIndexer(cf, irf, key, time.Second) indexer = newInnerRingIndexer(cf, irf, key, time.Second)
idx, err = indexer.AlphabetIndex(context.Background()) idx, err = indexer.AlphabetIndex()
require.ErrorContains(t, err, "test IR error", "error from commitee not throwed") require.ErrorContains(t, err, "test IR error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index") require.Equal(t, int32(0), idx, "invalid IR index")
size, err = indexer.InnerRingSize(context.Background()) size, err = indexer.InnerRingSize()
require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
} }
@ -220,7 +219,7 @@ type testIRFetcher struct {
calls atomic.Int32 calls atomic.Int32
} }
func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
f.calls.Add(1) f.calls.Add(1)
return f.keys, f.err return f.keys, f.err
} }

View file

@ -38,7 +38,10 @@ import (
func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
alphaSync event.Handler, alphaSync event.Handler,
) error { ) error {
locodeValidator := s.newLocodeValidator(cfg) locodeValidator, err := s.newLocodeValidator(cfg)
if err != nil {
return err
}
netSettings := (*networkSettings)(s.netmapClient) netSettings := (*networkSettings)(s.netmapClient)
@ -48,7 +51,6 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
poolSize := cfg.GetInt("workers.netmap") poolSize := cfg.GetInt("workers.netmap")
s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize)) s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
var err error
s.netmapProcessor, err = netmap.New(&netmap.Params{ s.netmapProcessor, err = netmap.New(&netmap.Params{
Log: s.log, Log: s.log,
Metrics: s.irMetrics, Metrics: s.irMetrics,
@ -98,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey) fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil { if err != nil {
fromMainChainBlock = 0 fromMainChainBlock = 0
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err)) s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
} }
mainnetChain.from = fromMainChainBlock mainnetChain.from = fromMainChainBlock
@ -378,6 +380,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
// form morph container client's options // form morph container client's options
morphCnrOpts := make([]container.Option, 0, 3) morphCnrOpts := make([]container.Option, 0, 3)
morphCnrOpts = append(morphCnrOpts, morphCnrOpts = append(morphCnrOpts,
container.TryNotary(),
container.AsAlphabet(), container.AsAlphabet(),
) )
@ -387,12 +390,12 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
} }
s.containerClient = result.CnrClient s.containerClient = result.CnrClient
s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet()) s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet())
if err != nil { if err != nil {
return nil, err return nil, err
} }
s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet()) s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -454,7 +457,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey) fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil { if err != nil {
fromSideChainBlock = 0 fromSideChainBlock = 0
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
} }
morphChain := &chainParams{ morphChain := &chainParams{

View file

@ -177,7 +177,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
if err != nil { if err != nil {
// we don't stop inner ring execution on this error // we don't stop inner ring execution on this error
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators, s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
zap.Error(err)) zap.String("error", err.Error()))
} }
s.tickInitialExpoch(ctx) s.tickInitialExpoch(ctx)
@ -308,7 +308,7 @@ func (s *Server) Stop(ctx context.Context) {
for _, c := range s.closers { for _, c := range s.closers {
if err := c(); err != nil { if err := c(); err != nil {
s.log.Warn(ctx, logs.InnerringCloserError, s.log.Warn(ctx, logs.InnerringCloserError,
zap.Error(err), zap.String("error", err.Error()),
) )
} }
} }
@ -575,19 +575,19 @@ func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNe
func (s *Server) initConfigFromBlockchain(ctx context.Context) error { func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
// get current epoch // get current epoch
epoch, err := s.netmapClient.Epoch(ctx) epoch, err := s.netmapClient.Epoch()
if err != nil { if err != nil {
return fmt.Errorf("can't read epoch number: %w", err) return fmt.Errorf("can't read epoch number: %w", err)
} }
// get current epoch duration // get current epoch duration
epochDuration, err := s.netmapClient.EpochDuration(ctx) epochDuration, err := s.netmapClient.EpochDuration()
if err != nil { if err != nil {
return fmt.Errorf("can't read epoch duration: %w", err) return fmt.Errorf("can't read epoch duration: %w", err)
} }
// get balance precision // get balance precision
balancePrecision, err := s.balanceClient.Decimals(ctx) balancePrecision, err := s.balanceClient.Decimals()
if err != nil { if err != nil {
return fmt.Errorf("can't read balance contract precision: %w", err) return fmt.Errorf("can't read balance contract precision: %w", err)
} }
@ -597,7 +597,7 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
s.precision.SetBalancePrecision(balancePrecision) s.precision.SetBalancePrecision(balancePrecision)
// get next epoch delta tick // get next epoch delta tick
s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx) s.initialEpochTickDelta, err = s.nextEpochBlockDelta()
if err != nil { if err != nil {
return err return err
} }
@ -613,8 +613,8 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
return nil return nil
} }
func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) { func (s *Server) nextEpochBlockDelta() (uint32, error) {
epochBlock, err := s.netmapClient.LastEpochBlock(ctx) epochBlock, err := s.netmapClient.LastEpochBlock()
if err != nil { if err != nil {
return 0, fmt.Errorf("can't read last epoch block: %w", err) return 0, fmt.Errorf("can't read last epoch block: %w", err)
} }

View file

@ -9,7 +9,7 @@ import (
"github.com/spf13/viper" "github.com/spf13/viper"
) )
func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator { func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) {
locodeDB := locodebolt.New(locodebolt.Prm{ locodeDB := locodebolt.New(locodebolt.Prm{
Path: cfg.GetString("locode.db.path"), Path: cfg.GetString("locode.db.path"),
}, },
@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
return irlocode.New(irlocode.Prm{ return irlocode.New(irlocode.Prm{
DB: (*locodeBoltDBWrapper)(locodeDB), DB: (*locodeBoltDBWrapper)(locodeDB),
}) }), nil
} }
type locodeBoltEntryWrapper struct { type locodeBoltEntryWrapper struct {

View file

@ -1,7 +1,6 @@
package innerring package innerring
import ( import (
"context"
"fmt" "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@ -18,8 +17,8 @@ type networkSettings netmapclient.Client
// MaintenanceModeAllowed requests network configuration from the Sidechain // MaintenanceModeAllowed requests network configuration from the Sidechain
// and check allowance of storage node's maintenance mode according to it. // and check allowance of storage node's maintenance mode according to it.
// Always returns state.ErrMaintenanceModeDisallowed. // Always returns state.ErrMaintenanceModeDisallowed.
func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error { func (s *networkSettings) MaintenanceModeAllowed() error {
allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx) allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed()
if err != nil { if err != nil {
return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err) return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err)
} else if allowed { } else if allowed {

View file

@ -279,6 +279,6 @@ type testNetmapClient struct {
netmap *netmap.NetMap netmap *netmap.NetMap
} }
func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
return c.netmap, nil return c.netmap, nil
} }

View file

@ -33,7 +33,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
// there is no signature collecting, so we don't need extra fee // there is no signature collecting, so we don't need extra fee
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod) _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
if err != nil { if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err)) ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
return false return false
} }
@ -44,10 +44,10 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
return true return true
} }
networkMap, err := ap.netmapClient.NetMap(ctx) networkMap, err := ap.netmapClient.NetMap()
if err != nil { if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
zap.Error(err)) zap.String("error", err.Error()))
return false return false
} }
@ -83,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil { if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey, ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
zap.Error(err)) zap.String("error", err.Error()))
continue continue
} }
@ -93,7 +93,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
ap.log.Warn(ctx, logs.AlphabetCantTransferGas, ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()), zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)), zap.Int64("amount", int64(gasPerNode)),
zap.Error(err), zap.String("error", err.Error()),
) )
} }
} }
@ -110,7 +110,7 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet, ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog), zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)), zap.Int64("amount", int64(gasPerNode)),
zap.Error(err), zap.String("error", err.Error()),
) )
} }
} }

View file

@ -36,7 +36,7 @@ type (
} }
netmapClient interface { netmapClient interface {
NetMap(ctx context.Context) (*netmap.NetMap, error) NetMap() (*netmap.NetMap, error)
} }
morphClient interface { morphClient interface {
@ -114,6 +114,11 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) {
ap.pwLock.Unlock() ap.pwLock.Unlock()
} }
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
return nil
}
// ListenerNotificationHandlers for the 'event.Listener' event producer. // ListenerNotificationHandlers for the 'event.Listener' event producer.
func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil return nil

View file

@ -88,16 +88,32 @@ func New(p *Params) (*Processor, error) {
}, nil }, nil
} }
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
var parsers []event.NotificationParserInfo
// new lock event
lock := event.NotificationParserInfo{}
lock.SetType(lockNotification)
lock.SetScriptHash(bp.balanceSC)
lock.SetParser(balanceEvent.ParseLock)
parsers = append(parsers, lock)
return parsers
}
// ListenerNotificationHandlers for the 'event.Listener' event producer. // ListenerNotificationHandlers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{ var handlers []event.NotificationHandlerInfo
{
Contract: bp.balanceSC, // lock handler
Type: lockNotification, lock := event.NotificationHandlerInfo{}
Parser: balanceEvent.ParseLock, lock.SetType(lockNotification)
Handlers: []event.Handler{bp.handleLock}, lock.SetScriptHash(bp.balanceSC)
}, lock.SetHandler(bp.handleLock)
} handlers = append(handlers, lock)
return handlers
} }
// ListenerNotaryParsers for the 'event.Listener' event producer. // ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -1,7 +1,6 @@
package container package container
import ( import (
"context"
"crypto/ecdsa" "crypto/ecdsa"
"errors" "errors"
"fmt" "fmt"
@ -46,7 +45,7 @@ type signatureVerificationData struct {
// - v.binPublicKey is a public session key // - v.binPublicKey is a public session key
// - session context corresponds to the container and verb in v // - session context corresponds to the container and verb in v
// - session is "alive" // - session is "alive"
func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error { func (cp *Processor) verifySignature(v signatureVerificationData) error {
var err error var err error
var key frostfsecdsa.PublicKeyRFC6979 var key frostfsecdsa.PublicKeyRFC6979
keyProvided := v.binPublicKey != nil keyProvided := v.binPublicKey != nil
@ -59,7 +58,7 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio
} }
if len(v.binTokenSession) > 0 { if len(v.binTokenSession) > 0 {
return cp.verifyByTokenSession(ctx, v, &key, keyProvided) return cp.verifyByTokenSession(v, &key, keyProvided)
} }
if keyProvided { if keyProvided {
@ -78,8 +77,8 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio
return errors.New("signature is invalid or calculated with the key not bound to the container owner") return errors.New("signature is invalid or calculated with the key not bound to the container owner")
} }
func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error { func (cp *Processor) checkTokenLifetime(token session.Container) error {
curEpoch, err := cp.netState.Epoch(ctx) curEpoch, err := cp.netState.Epoch()
if err != nil { if err != nil {
return fmt.Errorf("could not read current epoch: %w", err) return fmt.Errorf("could not read current epoch: %w", err)
} }
@ -91,7 +90,7 @@ func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Conta
return nil return nil
} }
func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error {
var tok session.Container var tok session.Container
err := tok.Unmarshal(v.binTokenSession) err := tok.Unmarshal(v.binTokenSession)
@ -119,7 +118,7 @@ func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerifi
return errors.New("owner differs with token owner") return errors.New("owner differs with token owner")
} }
err = cp.checkTokenLifetime(ctx, tok) err = cp.checkTokenLifetime(tok)
if err != nil { if err != nil {
return fmt.Errorf("check session lifetime: %w", err) return fmt.Errorf("check session lifetime: %w", err)
} }

View file

@ -170,11 +170,11 @@ type testNetworkState struct {
epoch uint64 epoch uint64
} }
func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) { func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) {
return s.homHashDisabled, nil return s.homHashDisabled, nil
} }
func (s *testNetworkState) Epoch(context.Context) (uint64, error) { func (s *testNetworkState) Epoch() (uint64, error) {
return s.epoch, nil return s.epoch, nil
} }
@ -187,7 +187,7 @@ func (c *testContainerClient) ContractAddress() util.Uint160 {
return c.contractAddress return c.contractAddress
} }
func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) {
key := hex.EncodeToString(cid) key := hex.EncodeToString(cid)
if cont, found := c.get[key]; found { if cont, found := c.get[key]; found {
return cont, nil return cont, nil
@ -237,6 +237,6 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction)
type testFrostFSIDClient struct{} type testFrostFSIDClient struct{}
func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { func (c *testFrostFSIDClient) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) {
return &frostfsidclient.Subject{}, nil return &frostfsidclient.Subject{}, nil
} }

View file

@ -47,10 +47,10 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
e: put, e: put,
} }
err := cp.checkPutContainer(ctx, pctx) err := cp.checkPutContainer(pctx)
if err != nil { if err != nil {
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
zap.Error(err), zap.String("error", err.Error()),
) )
return false return false
@ -58,7 +58,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil { if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer, cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
zap.Error(err), zap.String("error", err.Error()),
) )
return false return false
} }
@ -66,8 +66,8 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
return true return true
} }
func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error { func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
binCnr := pctx.e.Container() binCnr := ctx.e.Container()
var cnr containerSDK.Container var cnr containerSDK.Container
err := cnr.Unmarshal(binCnr) err := cnr.Unmarshal(binCnr)
@ -75,12 +75,12 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
return fmt.Errorf("invalid binary container: %w", err) return fmt.Errorf("invalid binary container: %w", err)
} }
err = cp.verifySignature(ctx, signatureVerificationData{ err = cp.verifySignature(signatureVerificationData{
ownerContainer: cnr.Owner(), ownerContainer: cnr.Owner(),
verb: session.VerbContainerPut, verb: session.VerbContainerPut,
binTokenSession: pctx.e.SessionToken(), binTokenSession: ctx.e.SessionToken(),
binPublicKey: pctx.e.PublicKey(), binPublicKey: ctx.e.PublicKey(),
signature: pctx.e.Signature(), signature: ctx.e.Signature(),
signedData: binCnr, signedData: binCnr,
}) })
if err != nil { if err != nil {
@ -88,13 +88,13 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
} }
// check homomorphic hashing setting // check homomorphic hashing setting
err = checkHomomorphicHashing(ctx, cp.netState, cnr) err = checkHomomorphicHashing(cp.netState, cnr)
if err != nil { if err != nil {
return fmt.Errorf("incorrect homomorphic hashing setting: %w", err) return fmt.Errorf("incorrect homomorphic hashing setting: %w", err)
} }
// check native name and zone // check native name and zone
err = cp.checkNNS(ctx, pctx, cnr) err = cp.checkNNS(ctx, cnr)
if err != nil { if err != nil {
return fmt.Errorf("NNS: %w", err) return fmt.Errorf("NNS: %w", err)
} }
@ -110,10 +110,10 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
return true return true
} }
err := cp.checkDeleteContainer(ctx, e) err := cp.checkDeleteContainer(e)
if err != nil { if err != nil {
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
zap.Error(err), zap.String("error", err.Error()),
) )
return false return false
@ -121,7 +121,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil { if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer, cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
zap.Error(err), zap.String("error", err.Error()),
) )
return false return false
@ -130,7 +130,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
return true return true
} }
func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error { func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error {
binCnr := e.ContainerID() binCnr := e.ContainerID()
var idCnr cid.ID var idCnr cid.ID
@ -141,12 +141,12 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.
} }
// receive owner of the related container // receive owner of the related container
cnr, err := cp.cnrClient.Get(ctx, binCnr) cnr, err := cp.cnrClient.Get(binCnr)
if err != nil { if err != nil {
return fmt.Errorf("could not receive the container: %w", err) return fmt.Errorf("could not receive the container: %w", err)
} }
err = cp.verifySignature(ctx, signatureVerificationData{ err = cp.verifySignature(signatureVerificationData{
ownerContainer: cnr.Value.Owner(), ownerContainer: cnr.Value.Owner(),
verb: session.VerbContainerDelete, verb: session.VerbContainerDelete,
idContainerSet: true, idContainerSet: true,
@ -163,21 +163,21 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.
return nil return nil
} }
func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error { func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error {
// fetch domain info // fetch domain info
pctx.d = containerSDK.ReadDomain(cnr) ctx.d = containerSDK.ReadDomain(cnr)
// if PutNamed event => check if values in container correspond to args // if PutNamed event => check if values in container correspond to args
if named, ok := pctx.e.(interface { if named, ok := ctx.e.(interface {
Name() string Name() string
Zone() string Zone() string
}); ok { }); ok {
if name := named.Name(); name != pctx.d.Name() { if name := named.Name(); name != ctx.d.Name() {
return fmt.Errorf("names differ %s/%s", name, pctx.d.Name()) return fmt.Errorf("names differ %s/%s", name, ctx.d.Name())
} }
if zone := named.Zone(); zone != pctx.d.Zone() { if zone := named.Zone(); zone != ctx.d.Zone() {
return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone()) return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone())
} }
} }
@ -186,12 +186,12 @@ func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cn
return fmt.Errorf("could not get container owner address: %w", err) return fmt.Errorf("could not get container owner address: %w", err)
} }
subject, err := cp.frostFSIDClient.GetSubject(ctx, addr) subject, err := cp.frostFSIDClient.GetSubject(addr)
if err != nil { if err != nil {
return fmt.Errorf("could not get subject from FrostfsID contract: %w", err) return fmt.Errorf("could not get subject from FrostfsID contract: %w", err)
} }
namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns") namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns")
if !hasNamespace { if !hasNamespace {
return nil return nil
} }
@ -203,8 +203,8 @@ func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cn
return nil return nil
} }
func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error { func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error {
netSetting, err := ns.HomomorphicHashDisabled(ctx) netSetting, err := ns.HomomorphicHashDisabled()
if err != nil { if err != nil {
return fmt.Errorf("could not get setting in contract: %w", err) return fmt.Errorf("could not get setting in contract: %w", err)
} }

View file

@ -25,7 +25,7 @@ type (
ContClient interface { ContClient interface {
ContractAddress() util.Uint160 ContractAddress() util.Uint160
Get(ctx context.Context, cid []byte) (*containercore.Container, error) Get(cid []byte) (*containercore.Container, error)
} }
MorphClient interface { MorphClient interface {
@ -33,7 +33,7 @@ type (
} }
FrostFSIDClient interface { FrostFSIDClient interface {
GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error)
} }
// Processor of events produced by container contract in the sidechain. // Processor of events produced by container contract in the sidechain.
@ -68,7 +68,7 @@ type NetworkState interface {
// //
// Must return any error encountered // Must return any error encountered
// which did not allow reading the value. // which did not allow reading the value.
Epoch(ctx context.Context) (uint64, error) Epoch() (uint64, error)
// HomomorphicHashDisabled must return boolean that // HomomorphicHashDisabled must return boolean that
// represents homomorphic network state: // represents homomorphic network state:
@ -76,7 +76,7 @@ type NetworkState interface {
// * false if hashing is enabled. // * false if hashing is enabled.
// //
// which did not allow reading the value. // which did not allow reading the value.
HomomorphicHashDisabled(ctx context.Context) (bool, error) HomomorphicHashDisabled() (bool, error)
} }
// New creates a container contract processor instance. // New creates a container contract processor instance.
@ -118,6 +118,11 @@ func New(p *Params) (*Processor, error) {
}, nil }, nil
} }
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
return nil
}
// ListenerNotificationHandlers for the 'event.Listener' event producer. // ListenerNotificationHandlers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil return nil

View file

@ -73,7 +73,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
err = np.morphClient.TransferGas(receiver, np.mintEmitValue) err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil { if err != nil {
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver, np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
zap.Error(err)) zap.String("error", err.Error()))
return false return false
} }

View file

@ -142,34 +142,70 @@ func New(p *Params) (*Processor, error) {
}, nil }, nil
} }
// ListenerNotificationParsers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
var (
parsers = make([]event.NotificationParserInfo, 0, 6)
p event.NotificationParserInfo
)
p.SetScriptHash(np.frostfsContract)
// deposit event
p.SetType(event.TypeFromString(depositNotification))
p.SetParser(frostfsEvent.ParseDeposit)
parsers = append(parsers, p)
// withdraw event
p.SetType(event.TypeFromString(withdrawNotification))
p.SetParser(frostfsEvent.ParseWithdraw)
parsers = append(parsers, p)
// cheque event
p.SetType(event.TypeFromString(chequeNotification))
p.SetParser(frostfsEvent.ParseCheque)
parsers = append(parsers, p)
// config event
p.SetType(event.TypeFromString(configNotification))
p.SetParser(frostfsEvent.ParseConfig)
parsers = append(parsers, p)
return parsers
}
// ListenerNotificationHandlers for the 'event.Listener' event producer. // ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return []event.NotificationHandlerInfo{ var (
{ handlers = make([]event.NotificationHandlerInfo, 0, 6)
Contract: np.frostfsContract,
Type: event.TypeFromString(depositNotification), h event.NotificationHandlerInfo
Parser: frostfsEvent.ParseDeposit, )
Handlers: []event.Handler{np.handleDeposit},
}, h.SetScriptHash(np.frostfsContract)
{
Contract: np.frostfsContract, // deposit handler
Type: event.TypeFromString(withdrawNotification), h.SetType(event.TypeFromString(depositNotification))
Parser: frostfsEvent.ParseWithdraw, h.SetHandler(np.handleDeposit)
Handlers: []event.Handler{np.handleWithdraw}, handlers = append(handlers, h)
},
{ // withdraw handler
Contract: np.frostfsContract, h.SetType(event.TypeFromString(withdrawNotification))
Type: event.TypeFromString(chequeNotification), h.SetHandler(np.handleWithdraw)
Parser: frostfsEvent.ParseCheque, handlers = append(handlers, h)
Handlers: []event.Handler{np.handleCheque},
}, // cheque handler
{ h.SetType(event.TypeFromString(chequeNotification))
Contract: np.frostfsContract, h.SetHandler(np.handleCheque)
Type: event.TypeFromString(configNotification), handlers = append(handlers, h)
Parser: frostfsEvent.ParseConfig,
Handlers: []event.Handler{np.handleConfig}, // config handler
}, h.SetType(event.TypeFromString(configNotification))
} h.SetHandler(np.handleConfig)
handlers = append(handlers, h)
return handlers
} }
// ListenerNotaryParsers for the 'event.Listener' event producer. // ListenerNotaryParsers for the 'event.Listener' event producer.

View file

@ -236,7 +236,7 @@ type testIRFetcher struct {
publicKeys keys.PublicKeys publicKeys keys.PublicKeys
} }
func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
return f.publicKeys, nil return f.publicKeys, nil
} }
@ -266,7 +266,7 @@ type testMainnetClient struct {
designateHash util.Uint160 designateHash util.Uint160
} }
func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) { func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
return c.alphabetKeys, nil return c.alphabetKeys, nil
} }

Some files were not shown because too many files have changed in this diff Show more